From 6bcb6c476f364dfae9cd640232e360f10ae28000 Mon Sep 17 00:00:00 2001 From: Nick Hayward Date: Fri, 12 Jan 2018 11:06:06 -0800 Subject: [PATCH] Release (#488) * rename terraform resources. add terraform import lines. add diff supression for deploy.content field. add import system test * remove crash.log * remove comment * update docs, fix unit tests * Terraform: improve handling of sigterm (#301) * rename terraform resources. add terraform import lines. add diff supression for deploy.content field. add import system test * remove crash.log * remove comment * update docs, fix unit tests * add Layer0Client for meta object * use waitWithContext helper functions. Remove partial state from service resource * remove wait * fix merge * check error codes for DoesNotExist in terraform module * return DoesNotExist error codes from backend layer * use 's' for receiver * Return '404 Not Found' errors when resource not found instead of '500 Server Error' errors. * split out system and stress tests * Add leftover tags janitor and handle resolving leftover tags in CLI. (#305) * API: Added Tag Janitor. CLI: Check ecs task exists before attempting to retrieve information on it. * tag janitor review feedback * Updated expiredTasks test * Merge master back into develop (#309) * Create CNAME * rename terraform resources. add terraform import lines. add diff supression for deploy.content field. add import system test * remove crash.log * remove comment * update docs, fix unit tests * Terraform: improve handling of sigterm (#301) * rename terraform resources. add terraform import lines. add diff supression for deploy.content field. add import system test * remove crash.log * remove comment * update docs, fix unit tests * add Layer0Client for meta object * use waitWithContext helper functions. Remove partial state from service resource * remove wait * fix merge * check error codes for DoesNotExist in terraform module * return DoesNotExist error codes from backend layer * use 's' for receiver * Return '404 Not Found' errors when resource not found instead of '500 Server Error' errors. * split out system and stress tests * Add leftover tags janitor and handle resolving leftover tags in CLI. (#305) * API: Added Tag Janitor. CLI: Check ecs task exists before attempting to retrieve information on it. * tag janitor review feedback * Updated expiredTasks test * update docs for 10.3 * l0-setup upgrade: Breadcrumb message when current version is not SemVer compliant (#310) If the current version of a user's layer0 (as defined in ~/.layer0//main.tf.json) is not semver compliant, this change will notify the user that they can use the --force flag to override the message. * Preserving previous error message, tidying up formatting and line length * 'Tester' iface; prototyping env stress tests with benchmarking * 232: dep init cleanup, benchmark stress test on multiple dimensions - The vendor/ dir cleanup is due to running go-dep's `dep init` - Ignoring Gopkg* and _vendor* - Benchmark tests are put into its own _test.go file. The pattern is such that the number of environments, deploys and services are parameterized when invoking benchmarkStress() * 232: forcing constraint on github.com/cenkalti/backoff Unit tests were failing because of very old semver version of this package Forced it to constrain to the master branch of the project * 232: dep manifest and lock should probably be included * 232: Stress tests split into tf modules, folding some changes from 232-tlake I created three tf modules, environment, service and deploy. The outputs of environment and deploy are picked up by service. Services are distributed using element() amongst available environments and deploys. * 232: Removing defer Terraform.Destroy() and instead explicitly calling Destroy() at end of for loop * 232: Cleaning up modules, parameterizing Dockerrun.aws.json command Benchmark tests are no longer in table testing pattern Dockerrun.aws.json now correctly interpolates the command passed to it by the test Directory cleanup and organizing of the tf modules * 232: Minor formatting change * 232: Modifying the benchmark tests to be more modest in scope * Add service limits warning * Update for 'make test' * Add ListTasks and ListJobs to benchmarks * 232: Adding Load Balancer parameterization in benchmark stress tests The benchmark tests now allow creating an arbitrary number of load balancers in a given test Also made some adjustments to the dimensions of the benchmark tests themselves * 232: More benchmark stress test cases * 232: Added a more 'realistic' benchmark test, tested with changes from 232-tlake * 232: More benchmark tests, terraform fmt adjustments * 232: Adding comment in README recommending the usage of flow. Makefile timeout set to 2h. * 232: Actually modifying the README * 232: 232-stress-tests will not include dep changes, that will be handled by another named branch * 232: syncing vendor dir state from develop into 232-stress-tests * 232: Adding specifics of limits in tests/README.md, cutting down the number of benchmark tests * Update to align with changes in `tftest` package (#332) * Update to work with changes in tftest * Remove earlier iteration stress test file * Use 'testing.B' properly to avoid panic * split out 'test' and 'benchmark' targets * Strip out 'StressTest' struct * Strip out 'StressTest' struct; Simplify function names and arguments * Add compatibility with 'tftest.Logger' * Update to use new 'StandardLogger.Logger' * Add benchmark target to top-level Makefile * Base of refactoring stress tests to use one terraform config * DRY out benchmarking code * Use a single Terraform config for stress tests * Test entity Get functions in addition to List * Refactor 'random_shuffle' resources out in favor of 'element()' with inherent modulus * Transfer test cases into updated pattern * Remove outdated multi-file terraform configuration * Remove parametrization of deploy command * Move 'Tester' iface into 'testutils' package * Remove 'deploy_command' var * Refactor task stress test into benchmark pattern * Add top-of-test checks for entity dependencies * Strip out go func args * Don't wait for tasks to finish * Strip out overcomplicated Task creation logic * Overhaul test cases to use min/med/max params * Vendoring update * Reduce test matrix for Families+Deploys * Added terraform init to l0-setup init * Delete terraform * use cluster names in ListEnvironments * enable ecs metrics via IAM policy * [v0.10.4] scaler patch (#472) * Remove instance.AgentConnected check when gathering resource providers. * Update test to reflect that agent status is not taken into account * Log a disconnected agent as info, but do nothing about it * update terraform module to use region, ami data sources (#471) * enables terraform AMI, region lookup * apply terraform fmt * [v0.10.4] 466: Implement retry and delay logic against AWS API (#468) * 466: Implement retry and delay logic against AWS API Recognizes the environment variables LAYER0_AWS_TIME_BETWEEN_REQUESTS and LAYER0_AWS_MAX_RETRIES. LAYER0_AWS_TIME_BETWEEN_REQUESTS is a time duration for the min amount of time allowed between AWS requests LAYER0_AWS_MAX_RETRIES is a numerical value representing the max number of times a request will be retried for failures * MAINT: Review fixes * MAINT: Undo changes to dynamo db tests * MAINT: Make common aws config helper function * 466: Make new helper function to handle session push back * 466: Change session delay helper function name * [v0.10.4] 475: Remove AWS Region default and require region for API operation (#476) * 475: Remove AWS Region default and require region for API operation * 475: Inclue region as l0-setup endpoint -d output The AWS Region specified when creating a layer0 instance is now output when running the command l0-setup endpoint -d. Fixes an AMI ID for the API module in the us-east-1 region * 475: Use develop version of setup/module/api/variables.tf * 475: Use develop version of setup/module/api/variables.tf * MAINT: Remove default AWS region from config * [v0.10.4] Decrease ECS API Calls (#474) * use cluster names in ListEnvironments * add task environment id resolving * fix get task * fix delete and create task * update task logs * update list services api call * update get environment * chagne scheduler to run once an hour * Fix api calls on service list for scaling * use DescribeEnvironmentTasks for scaler * remove copies from task create * re-add copies param to task create in cli * use 'layer0_version' instead of version for tf variable * Update README.md * MAINT: Make l0 deploy list only return active task definitions (#480) The methods called in the AWS SDK that pull back Task Definitions should only be retrieving Task Definitions that are active. Without filtering Task Definitions by status, inactive Task Definitions can still be retrieved. For Layer0 instances that have been up for a long time, inactive Task Definitions can accumulate and has been known to slow the operation l0 deploy list down considerably. * [v0.10.4] Fix System and Stress tests (#482) * update tftest * add init targets to system and stress test makefiles: * fixup stress tests * fix stress test target * add debug flag * 444: Fix l0-setup apply issue for instances created in another region When a new AWS Provider needs to be created, aws.Config needs to contain the correct region for interactions with AWS to work. In the case where l0 instances are initialized and set up in a region other than the default us-west-2, the resources will create in the region specified in the init wizard, but pushing terraform state to the S3 bucket fails because it was always assumed that the AWS region would be us-west-2. To fix this, anytime an AWS Provider is needed, the instance derives what region it's in (assuming that l0-setup apply ran to completion), and then passes that region into the AWS Provider helper function so that aws.Config sets the correct region for API operations via the WithRegion() method. * 444: Remove aws-region flag from l0-setup list command * MAINT: Make clarifying comments on l0-setup commands * 444: Modify l0-setup pull to calculate region instance's bucket is in To solve the problem where a user might need to pull a remote layer0 instance but doesn't have the instance locally, we need to calculate the region the bucket is in before we actually pull from the bucket. Two helper functions are created to help with this: one to retrieve a list of S3 buckets then match the associated bucket with the one provided by the user as the instance name, and one to determine the matched bucket's region. * MAINT: Add clarifying comments on default AWS region usage * MAINT: Change setup/instance package references in l0-setup commands * update windows ecs agent version, remove bats admin test (#483) * Release candidate doc fixes (#485) * MAINT: Make doc changes for Introduction and Install * MAINT: Clean up cli reference and upgrade instructions * MAINT: Build mkdown docs * MAINT: Remove center tag from download table on home page * MAINT: Clean up the CLI reference page * Update docs (#484) * Update deploy documents to add terraform init * Update docs to fix issue #361 and make error message clearer * MAINT: Clean up the CLI reference some more * MAINT: Clean up the l0-setup CLI reference * MAINT: Make minor usage changes in l0-setup cli reference * MAINT: Remove terraform beyond layer0 guide, clean up broken links, formatting cleanup * MAINT: Remove walkthrough deployment 3, guestbook with consul * MAINT: Fix spacing issue on setup cli reference * fix flow version target * MAINT: Make code review changes * MAINT: Make formatting fixes * MAINT: Make more code review changes. Fix spacing issues * MAINT: Code review changes * MAINT: Fix error message text in aws provider helper function * MAINT: More formatting cleanup * MAINT: Change mkdocs edit_uri to fix the edit icon on each doc page * Docs style update (#486) * Fix style bugs * Fix spacing issues * Udpate makefile to specify version * Remove commentted code * Fix merge conflicts * Fix issues when adjusting header logo * MAINT: Change casing on site terraform.io references * MAINT: Post make build commit * MAINT: Formatting changes on terraform plugin reference page. a href color change for admonition * MAINT: Formatting changes on terraform plugin reference page. a href color change for admonition * Remove leading '$' in code block to improve copypaste (#487) * Release doc updates for v0.10.4 * Update release notes --- .DS_Store | Bin 0 -> 6148 bytes Makefile | 4 +- README.md | 2 +- RELEASE_NOTES.md | 17 + api/backend/ecs/environment_manager.go | 2 +- common/aws/ecs/ecs.go | 4 + docs-src/Makefile | 1 + docs-src/docs/CNAME | 1 + .../docs/guides/terraform_beyond_layer0.md | 510 -- .../docs/guides/walkthrough/deployment-1.md | 44 +- .../docs/guides/walkthrough/deployment-2.md | 48 +- .../docs/guides/walkthrough/deployment-3.md | 423 -- docs-src/docs/index.md | 14 +- docs-src/docs/intro.md | 2 +- docs-src/docs/reference/cli.md | 1509 ++--- docs-src/docs/reference/consul.md | 2 - docs-src/docs/reference/setup-cli.md | 164 +- docs-src/docs/reference/task_definition.md | 2 +- docs-src/docs/reference/terraform-plugin.md | 75 +- docs-src/docs/reference/updateservice.md | 85 +- docs-src/docs/releases.md | 1 + docs-src/docs/setup/destroy.md | 11 +- docs-src/docs/setup/install.md | 85 +- docs-src/docs/setup/upgrade.md | 15 +- docs-src/docs/stylesheets/extra.css | 81 +- docs-src/docs/troubleshooting/commonissues.md | 39 +- docs-src/docs/troubleshooting/ssh.md | 2 +- docs-src/material/404.html | 4 + docs-src/material/assets/images/favicon.png | Bin 0 -> 521 bytes .../images/icons/bitbucket.4ebea66e.svg | 1 + .../assets/images/icons/github.a4034fb1.svg | 1 + .../assets/images/icons/gitlab.d80e5efc.svg | 1 + .../javascripts/application.cae2244d.js | 1 + .../assets/javascripts/lunr/lunr.da.js | 1 + .../assets/javascripts/lunr/lunr.de.js | 1 + .../assets/javascripts/lunr/lunr.du.js | 1 + .../assets/javascripts/lunr/lunr.es.js | 1 + .../assets/javascripts/lunr/lunr.fi.js | 1 + .../assets/javascripts/lunr/lunr.fr.js | 1 + .../assets/javascripts/lunr/lunr.hu.js | 1 + .../assets/javascripts/lunr/lunr.it.js | 1 + .../assets/javascripts/lunr/lunr.jp.js | 1 + .../assets/javascripts/lunr/lunr.multi.js | 1 + .../assets/javascripts/lunr/lunr.no.js | 1 + .../assets/javascripts/lunr/lunr.pt.js | 1 + .../assets/javascripts/lunr/lunr.ro.js | 1 + .../assets/javascripts/lunr/lunr.ru.js | 1 + .../javascripts/lunr/lunr.stemmer.support.js | 1 + .../assets/javascripts/lunr/lunr.sv.js | 1 + .../assets/javascripts/lunr/lunr.tr.js | 1 + .../assets/javascripts/lunr/tinyseg.js | 1 + .../assets/javascripts/modernizr.1aa3b519.js | 1 + .../application-palette.792431c1.css | 2 + .../stylesheets/application.0e9c8aca.css | 2 + docs-src/material/base.html | 342 +- docs-src/material/main.html | 1 + docs-src/material/mkdocs_theme.yml | 70 + docs-src/material/partials/footer.html | 58 + docs-src/material/partials/header.html | 49 + docs-src/material/partials/hero.html | 10 + .../partials/integrations/analytics.html | 1 + .../partials/integrations/disqus.html | 14 + docs-src/material/partials/language.html | 9 + docs-src/material/partials/language/da.html | 18 + docs-src/material/partials/language/de.html | 18 + docs-src/material/partials/language/en.html | 18 + docs-src/material/partials/language/es.html | 18 + docs-src/material/partials/language/fr.html | 18 + docs-src/material/partials/language/it.html | 18 + docs-src/material/partials/language/ja.html | 18 + docs-src/material/partials/language/kr.html | 18 + docs-src/material/partials/language/nl.html | 18 + docs-src/material/partials/language/no.html | 18 + docs-src/material/partials/language/pl.html | 1 + docs-src/material/partials/language/pt.html | 18 + docs-src/material/partials/language/ru.html | 18 + docs-src/material/partials/language/sv.html | 18 + docs-src/material/partials/language/tr.html | 18 + .../material/partials/language/zh-Hant.html | 18 + docs-src/material/partials/language/zh.html | 18 + docs-src/material/partials/nav-item.html | 54 + docs-src/material/partials/nav.html | 24 + docs-src/material/partials/search.html | 21 + docs-src/material/partials/social.html | 9 + docs-src/material/partials/source.html | 25 + docs-src/material/partials/tabs-item.html | 31 + docs-src/material/partials/tabs.html | 13 + docs-src/material/partials/toc-item.html | 14 + docs-src/material/partials/toc.html | 29 + docs-src/mkdocs.yml | 16 +- docs/404.html | 612 ++ docs/CNAME | 2 +- docs/assets/fonts/icon.eot | Bin docs/assets/fonts/icon.svg | 0 docs/assets/fonts/icon.ttf | Bin docs/assets/fonts/icon.woff | Bin docs/assets/images/favicon.png | Bin 0 -> 521 bytes .../images/icons/bitbucket.4ebea66e.svg | 1 + docs/assets/images/icons/github.a4034fb1.svg | 1 + docs/assets/images/icons/gitlab.d80e5efc.svg | 1 + .../javascripts/application.cae2244d.js | 1 + docs/assets/javascripts/lunr/lunr.da.js | 1 + docs/assets/javascripts/lunr/lunr.de.js | 1 + docs/assets/javascripts/lunr/lunr.du.js | 1 + docs/assets/javascripts/lunr/lunr.es.js | 1 + docs/assets/javascripts/lunr/lunr.fi.js | 1 + docs/assets/javascripts/lunr/lunr.fr.js | 1 + docs/assets/javascripts/lunr/lunr.hu.js | 1 + docs/assets/javascripts/lunr/lunr.it.js | 1 + docs/assets/javascripts/lunr/lunr.jp.js | 1 + docs/assets/javascripts/lunr/lunr.multi.js | 1 + docs/assets/javascripts/lunr/lunr.no.js | 1 + docs/assets/javascripts/lunr/lunr.pt.js | 1 + docs/assets/javascripts/lunr/lunr.ro.js | 1 + docs/assets/javascripts/lunr/lunr.ru.js | 1 + .../javascripts/lunr/lunr.stemmer.support.js | 1 + docs/assets/javascripts/lunr/lunr.sv.js | 1 + docs/assets/javascripts/lunr/lunr.tr.js | 1 + docs/assets/javascripts/lunr/tinyseg.js | 1 + docs/assets/javascripts/modernizr.1aa3b519.js | 1 + .../application-palette.792431c1.css | 2 + .../stylesheets/application.0e9c8aca.css | 2 + docs/guides/one_off_task/index.html | 987 ++-- .../guides/terraform_beyond_layer0/index.html | 985 ---- .../walkthrough/deployment-1/index.html | 1296 +++-- .../walkthrough/deployment-2/index.html | 1294 +++-- .../walkthrough/deployment-3/index.html | 885 --- docs/guides/walkthrough/intro/index.html | 837 +-- docs/index.html | 913 +-- docs/intro/index.html | 970 ++-- docs/mkdocs/js/lunr.min.js | 7 - docs/mkdocs/js/mustache.min.js | 1 - docs/mkdocs/js/require.js | 36 - .../js/search-results-template.mustache | 4 - docs/mkdocs/js/search.js | 88 - docs/mkdocs/js/text.js | 390 -- docs/mkdocs/search_index.json | 1904 ------- docs/reference/architecture/index.html | 839 +-- docs/reference/cli/index.html | 4967 ++++++++++++----- docs/reference/consul/index.html | 1018 ++-- docs/reference/ecr/index.html | 1061 ++-- docs/reference/setup-cli/index.html | 1784 ++++-- docs/reference/task_definition/index.html | 887 +-- docs/reference/terraform-plugin/index.html | 1910 +++++-- .../terraform_introduction/index.html | 940 ++-- docs/reference/updateservice/index.html | 1005 ++-- docs/releases/index.html | 828 +-- docs/search/search_index.json | 1724 ++++++ docs/setup/destroy/index.html | 921 +-- docs/setup/install/index.html | 1085 ++-- docs/setup/upgrade/index.html | 868 +-- docs/sitemap.xml | 54 +- docs/stylesheets/extra.css | 81 +- docs/troubleshooting/commonissues/index.html | 977 ++-- docs/troubleshooting/ssh/index.html | 822 +-- scripts/flow.sh | 4 +- setup/command/apply.go | 14 +- setup/command/aws.go | 22 +- setup/command/list.go | 7 +- setup/command/pull.go | 60 +- setup/command/push.go | 12 +- tests/README.md | 5 + tests/clients/layer0_test_client.go | 68 +- tests/smoke/README.md | 6 +- tests/smoke/admin.bats | 4 - tests/stress/Makefile | 13 +- tests/stress/main_test.go | 3 - tests/stress/stress_test.go | 1 - tests/system/Makefile | 18 +- tests/system/cases/datasources/outputs.tf | 2 +- tests/system/system_test.go | 8 +- .../github.com/quintilesims/tftest/context.go | 25 +- .../github.com/quintilesims/tftest/logger.go | 27 - .../github.com/quintilesims/tftest/options.go | 6 - .../quintilesims/tftest/test_context.go | 6 + vendor/vendor.json | 6 +- 176 files changed, 21842 insertions(+), 15737 deletions(-) create mode 100644 .DS_Store create mode 100644 docs-src/docs/CNAME delete mode 100644 docs-src/docs/guides/terraform_beyond_layer0.md delete mode 100644 docs-src/docs/guides/walkthrough/deployment-3.md create mode 100644 docs-src/material/404.html create mode 100644 docs-src/material/assets/images/favicon.png create mode 100644 docs-src/material/assets/images/icons/bitbucket.4ebea66e.svg create mode 100644 docs-src/material/assets/images/icons/github.a4034fb1.svg create mode 100644 docs-src/material/assets/images/icons/gitlab.d80e5efc.svg create mode 100644 docs-src/material/assets/javascripts/application.cae2244d.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.da.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.de.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.du.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.es.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.fi.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.fr.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.hu.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.it.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.jp.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.multi.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.no.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.pt.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.ro.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.ru.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.stemmer.support.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.sv.js create mode 100644 docs-src/material/assets/javascripts/lunr/lunr.tr.js create mode 100644 docs-src/material/assets/javascripts/lunr/tinyseg.js create mode 100644 docs-src/material/assets/javascripts/modernizr.1aa3b519.js create mode 100644 docs-src/material/assets/stylesheets/application-palette.792431c1.css create mode 100644 docs-src/material/assets/stylesheets/application.0e9c8aca.css create mode 100644 docs-src/material/main.html create mode 100644 docs-src/material/mkdocs_theme.yml create mode 100644 docs-src/material/partials/footer.html create mode 100644 docs-src/material/partials/header.html create mode 100644 docs-src/material/partials/hero.html create mode 100644 docs-src/material/partials/integrations/analytics.html create mode 100644 docs-src/material/partials/integrations/disqus.html create mode 100644 docs-src/material/partials/language.html create mode 100644 docs-src/material/partials/language/da.html create mode 100644 docs-src/material/partials/language/de.html create mode 100644 docs-src/material/partials/language/en.html create mode 100644 docs-src/material/partials/language/es.html create mode 100644 docs-src/material/partials/language/fr.html create mode 100644 docs-src/material/partials/language/it.html create mode 100644 docs-src/material/partials/language/ja.html create mode 100644 docs-src/material/partials/language/kr.html create mode 100644 docs-src/material/partials/language/nl.html create mode 100644 docs-src/material/partials/language/no.html create mode 100644 docs-src/material/partials/language/pl.html create mode 100644 docs-src/material/partials/language/pt.html create mode 100644 docs-src/material/partials/language/ru.html create mode 100644 docs-src/material/partials/language/sv.html create mode 100644 docs-src/material/partials/language/tr.html create mode 100644 docs-src/material/partials/language/zh-Hant.html create mode 100644 docs-src/material/partials/language/zh.html create mode 100644 docs-src/material/partials/nav-item.html create mode 100644 docs-src/material/partials/nav.html create mode 100644 docs-src/material/partials/search.html create mode 100644 docs-src/material/partials/social.html create mode 100644 docs-src/material/partials/source.html create mode 100644 docs-src/material/partials/tabs-item.html create mode 100644 docs-src/material/partials/tabs.html create mode 100644 docs-src/material/partials/toc-item.html create mode 100644 docs-src/material/partials/toc.html create mode 100644 docs/404.html mode change 100755 => 100644 docs/assets/fonts/icon.eot mode change 100755 => 100644 docs/assets/fonts/icon.svg mode change 100755 => 100644 docs/assets/fonts/icon.ttf mode change 100755 => 100644 docs/assets/fonts/icon.woff create mode 100644 docs/assets/images/favicon.png create mode 100644 docs/assets/images/icons/bitbucket.4ebea66e.svg create mode 100644 docs/assets/images/icons/github.a4034fb1.svg create mode 100644 docs/assets/images/icons/gitlab.d80e5efc.svg create mode 100644 docs/assets/javascripts/application.cae2244d.js create mode 100644 docs/assets/javascripts/lunr/lunr.da.js create mode 100644 docs/assets/javascripts/lunr/lunr.de.js create mode 100644 docs/assets/javascripts/lunr/lunr.du.js create mode 100644 docs/assets/javascripts/lunr/lunr.es.js create mode 100644 docs/assets/javascripts/lunr/lunr.fi.js create mode 100644 docs/assets/javascripts/lunr/lunr.fr.js create mode 100644 docs/assets/javascripts/lunr/lunr.hu.js create mode 100644 docs/assets/javascripts/lunr/lunr.it.js create mode 100644 docs/assets/javascripts/lunr/lunr.jp.js create mode 100644 docs/assets/javascripts/lunr/lunr.multi.js create mode 100644 docs/assets/javascripts/lunr/lunr.no.js create mode 100644 docs/assets/javascripts/lunr/lunr.pt.js create mode 100644 docs/assets/javascripts/lunr/lunr.ro.js create mode 100644 docs/assets/javascripts/lunr/lunr.ru.js create mode 100644 docs/assets/javascripts/lunr/lunr.stemmer.support.js create mode 100644 docs/assets/javascripts/lunr/lunr.sv.js create mode 100644 docs/assets/javascripts/lunr/lunr.tr.js create mode 100644 docs/assets/javascripts/lunr/tinyseg.js create mode 100644 docs/assets/javascripts/modernizr.1aa3b519.js create mode 100644 docs/assets/stylesheets/application-palette.792431c1.css create mode 100644 docs/assets/stylesheets/application.0e9c8aca.css delete mode 100644 docs/guides/terraform_beyond_layer0/index.html delete mode 100644 docs/guides/walkthrough/deployment-3/index.html delete mode 100644 docs/mkdocs/js/lunr.min.js delete mode 100644 docs/mkdocs/js/mustache.min.js delete mode 100644 docs/mkdocs/js/require.js delete mode 100644 docs/mkdocs/js/search-results-template.mustache delete mode 100644 docs/mkdocs/js/search.js delete mode 100644 docs/mkdocs/js/text.js delete mode 100644 docs/mkdocs/search_index.json create mode 100644 docs/search/search_index.json delete mode 100644 vendor/github.com/quintilesims/tftest/logger.go diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..c235a76056e6d73d3c2f39ec509caf9f303e4286 GIT binary patch literal 6148 zcmeHKyJ`bL3>+mc4$`=Exj&F1f3O(m3;Y8H8`2mX9FqE~d{;g#qX)+3oF/go/src/github.com/quintilesims/guides/terraform-beyond-layer0/example-1/modules/guestbook_service - -Before deploying, we can run the following command to see what changes Terraform will make to your infrastructure should you go ahead and apply. If you had any errors in your layer0.tf file, running `terraform plan` would output those errors so that you can address them. Also, Terraform will prompt you for configuration values that it does not have. - -!!! Tip - There are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the `terraform.tfvars` file, or exporting environment variables like `TF_VAR_endpoint` and `TF_VAR_token`, for example). See the [Terraform Docs](https://www.terraform.io/docs/configuration/variables.html) for more. - -`terraform plan` - -``` -var.endpoint - Enter a value: - -var.token - Enter a value: -... -+ aws_DynamoDB_table.guestbook - arn: "" - attribute.#: "1" - attribute.4228504427.name: "id" - attribute.4228504427.type: "S" - hash_key: "id" - name: "guestbook" - read_capacity: "20" - stream_arn: "" - stream_enabled: "" - stream_view_type: "" - write_capacity: "20" - -... -``` - -## Part 3: Terraform Apply - -Run the following command to begin the deploy process. - -`terraform apply` - -``` -layer0_environment.demo: Refreshing state... -... -... -... -layer0_service.guestbook: Creation complete - -Apply complete! Resources: 7 added, 0 changed, 0 destroyed. - -The state of your infrastructure has been saved to the path -below. This state is required to modify and destroy your -infrastructure, so keep it safe. To inspect the complete state -use the `terraform show` command. - -State path: terraform.tfstate - -Outputs: - -guestbook_url = -``` - -!!! Note - It may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time, you may get HTTP 503 errors when making HTTP requests against the load balancer URL. - - -Terraform will set up the entire environment for you and then output a link to the application's load balancer. - -### What's happening - -Terraform using the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html), provisions a new DynamoDB table. It also uses the [Layer0 provider](http://layer0.ims.io/reference/terraform-plugin/#provider) to provision the environment, deploy, load balancer and service required to run the entire guestbook application. - -Looking at an excerpt of the file [./terraform-beyond-layer0/example-1/modules/guestbook_service/main.tf](https://github.com/quintilesims/guides/blob/master/terraform-beyond-layer0/example-1/modules/guestbook_service/main.tf), we can see the following definitions: - -``` -resource "aws_dynamodb_table" "guestbook" { - name = "${var.table_name}" - read_capacity = 20 - write_capacity = 20 - hash_key = "id" - - attribute { - name = "id" - type = "S" - } -} - -resource "layer0_deploy" "guestbook" { - name = "guestbook" - content = "${data.template_file.guestbook.rendered}" -} - -data "template_file" "guestbook" { - template = "${file("Dockerrun.aws.json")}" - - vars { - access_key = "${var.access_key}" - secret_key = "${var.secret_key}" - region = "${var.region}" - table_name = "${aws_dynamodb_table.guestbook.name}" - } -} -``` - -Note the resource definitions for `aws_dynamodb_table` and `layer0_deploy`. To configure the guestbook application to use the provisioned DynamoDB table, we reference the `name` property from the DynamoDB definition `table_name = "${aws_dynamodb_table.guestbook.name}"`. - -These `vars` are used to populate the template fields in our [Dockerrun.aws.json](https://github.com/quintilesims/guides/blob/master/terraform-beyond-layer0/example-1/modules/guestbook_service/Dockerrun.aws.json) file. - -``` -{ - "AWSEBDockerrunVersion": 2, - "containerDefinitions": [ - { - "name": "guestbook", - "image": "quintilesims/guestbook-db", - "essential": true, - "memory": 128, - "environment": [ - { - "name": "DYNAMO_TABLE", - "value": "${table_name}" - } - ... -``` - -The Layer0 configuration referencing the AWS DynamoDB configuration `table_name = "${aws_DynamoDB_table.guestbook.name}"`, infers an implicit dependency. Before Terraform creates the infrastructure, it will use this information to order the resource creation and create resources in parallel, where there are no dependencies. In this example, the AWS DynamoDB table will be created before the Layer0 deploy. See [Terraform Resource Dependencies](https://www.terraform.io/intro/getting-started/dependencies.html) for more information. - -## Part 4: Scaling a Layer0 Service - -The workflow to make changes to your infrastructure generally involves updating your Terraform configuration file followed by a `terraform plan` and `terraform apply`. - -### Update the Terraform configuration -Open the file `./example-1/modules/guestbook_service/main.tf` in a text editor and make the change to add a `scale` property with a value of `3` to the `layer0_service` section. For more information about the `scale` property, see [Layer0 Terraform Plugin](http://layer0.ims.io/reference/terraform-plugin/#service) documentation. The result should look like the below: - -example-1/modules/guestbook_service/main.tf - -``` -# Create a service named "guestbook" -resource "layer0_service" "guestbook" { - name = "guestbook" - environment = "${layer0_environment.demo.id}" - deploy = "${layer0_deploy.guestbook.id}" - load_balancer = "${layer0_load_balancer.guestbook.id}" - scale = 3 -} - -``` - -### Plan and Apply - -Execute the `terraform plan` command to understand the changes that you will be making. Note that if you did not specify `scale`, it defaults to '1'. - -`terraform plan` - -Outputs: - -``` -... - -~ module.guestbook.layer0_service.guestbook - scale: "1" => "3" -``` - -Now run the following command to deploy your changes: - -`terraform apply` - -Outputs: - -``` -layer0_environment.demo: Refreshing state... (ID: demoenvbb9f6) -data.template_file.guestbook: Refreshing state... -layer0_deploy.guestbook: Refreshing state... (ID: guestbook.6) -layer0_load_balancer.guestbook: Refreshing state... (ID: guestbo43ab0) -layer0_service.guestbook: Refreshing state... (ID: guestboebca1) -layer0_service.guestbook: Modifying... (ID: guestboebca1) - scale: "1" => "3" -layer0_service.guestbook: Modifications complete (ID: guestboebca1) - -Apply complete! Resources: 0 added, 1 changed, 0 destroyed. - -The state of your infrastructure has been saved to the path -below. This state is required to modify and destroy your -infrastructure, so keep it safe. To inspect the complete state -use the `terraform show` command. - -State path: - -Outputs: - -services = -``` - -To confirm your service has been updated to the desired scale, you can run the following layer0 command. Note that the desired scale for the guestbook service should be eventually be 3/3. - -`l0 service get guestbook1_guestbook_svc` -Outputs: - -``` -SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE -SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE -guestbo4fd3b guestbook1_guestbook_svc demo guestbook1_guestbook_lb guestbook1_guestbook_dpl:3* 1/3 (2) -``` - -As scale is a parameter we are likely to change in the future, rather than hardcoding it to 3 as we have done just now, it would be better to use a variable to store `service_scale`. The following Best Practices sections will show how you can achieve this. - -!!! Note "Best Practices with Terraform + Layer0" - The following sections outline some of the best practices and tips to take into consideration, when using Layer0 with Terraform. - -## Part 5: Terraform Remote State - -Terraform stores the state of the deployed infrastructure in a local file named `terraform.tfstate` by default. To find out more about why Terraform needs to store state, see [Purpose of Terraform State](https://www.terraform.io/docs/state/purpose.html). - -How state is loaded and used for operations such as `terraform apply` is determined by a [Backend](https://www.terraform.io/docs/backends). As mentioned, by default the state is stored locally which is enabled by a "local" backend. - -### Remote State - -By default, Terraform stores state locally but it can also be configured to store state in a remote backend. This can prove useful when you are working as part of a team to provision and manage services deployed by Terraform. All the members of the team will need access to the state file to apply new changes and be able to do so without overwriting each others' changes. See here for more information on the different [backend types](https://www.terraform.io/docs/backends/types/index.html) supported by Terraform. - -To configure a remote backend, append the `terraform` section below to your terraform file `./example-1/main.tf`. Populate the `bucket` property to an existing s3 bucket. - -!!! Tip - If you have been following along with the guide, `./example-1/main.tf` should already have the below section commented out. You can uncomment the `terraform` section and populate the bucket property with an appropriate value. - -``` -terraform { - backend "s3" { - bucket = "" - key = "demo-env/remote-backend/terraform.tfstate" - region = "us-west-2" - } -} -``` - -Once you have modified `main.tf`, you will need to initialize the newly configured backend by running the following command. - -`terraform init` - -Outputs: - -``` -Initializing the backend... - -Do you want to copy state from "local" to "consul"? - ... - Do you want to copy the state from "local" to "consul"? Enter "yes" to copy - and "no" to start with the existing state in "consul". - - Enter a value: - -``` - -Go ahead and enter: `yes`. - -``` - -Successfully configured the backend "consul"! Terraform will automatically -use this backend unless the backend configuration changes. - -Terraform has been successfully initialized! -... - -``` - -### What's happening - -As you are configuring a backend for the first time, Terraform will give you an option to migrate your state to the new backend. From now on, any further changes to your infrastructure made by Terraform will result in the remote state file being updated. For more information see [Terraform backends](https://www.terraform.io/docs/backends/index.html). - -A new team member can use the `main.tf` from their own machine without obtaining a copy of the state file `terraform.tfstate` as the configuration will retrieve the state file from the remote backend. - -### Locking - -Not all remote backends support locking (locking ensures only one person is able to change the state at a time). The `S3` backend we used earlier in the example supports locking which is disabled by default. The `S3` backend uses a DynamoDB table to acquire a lock before making a change to the state file. To enable locking, you need to specify `locking_table` property with the name of an existing DynamoDB table. The DynamoDB table also needs primary key named `LockID` of type `String`. - -### Security - -A Terraform state file is written in plain text. This can lead to a situation where deploying resources that require sensitive data can result in the sensitive data being stored in the state file. To minimize exposure of sensitive data, you can enable [server side encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) of the state file by adding property `encrypt` set to `true`. - -This will ensure that the file is encrypted in S3 and by using a remote backend, you will also have the added benefit of the state file not being persisted to disk locally as it will only ever be held in memory by Terraform. - -For securing the state file further, you can also enable access logging on the S3 bucket you are using for the remote backend, which can help track down invalid access should it occur. - -## Part 6: Terraform Configuration Structure - -While there are many different approaches to organizing your Terraform code, we suggest using the following file structure: - -``` -example1/ # contains overarching Terraform deployment, pulls in any modules that might exist - ─ main.tf - ─ variables.tf - ─ output.tf - + modules/ # if you can break up deployment into smaller modules, keep the modules in here - + guestbook_service/ # contains Terraform configuration for a module - ─ main.tf - ─ variables.tf - ─ output.tf - + service2/ # contains another module - + service3/ # contains another module -``` - -Here we are making use of Terraform [Modules](https://www.terraform.io/docs/modules/index.html). Modules in Terraform are self-contained packages of Terraform configurations, that are managed as a group. Modules are used to create reusable components in Terraform as well as for basic code organization. In this example, we are using modules to separate each service and making it consumable as a module. - -If you wanted to add a new service, you can create a new service folder inside the ./modules. If you wanted to you could even run multiple copies of the same service. See here for more information about [Creating Modules](https://www.terraform.io/docs/modules/create.html). - -Also see the below repositories for ideas on different ways you can organize your Terraform configuration files for the needs of your specific project: - -* [Terraform Community Modules](https://github.com/terraform-community-modules) -* [Best Pratices Ops](https://github.com/hashicorp/best-practices) - -## Part 7: State Environments - -Layer0 recommends that you typically make a single environment for each tier of your application, such as `dev`, `staging` and `production`. That recommendation still holds when using Terraform with Layer0. Using Layer0 CLI, you can target a specific environment for most CLI commands. This enables you to service each tier relatively easily. In Terraform, there a few approaches you can take to enable a similar workflow. - -### Single Terraform Configuration - -You can use a single Terraform configuration to create and maintain multiple environments by making use of the [Count](https://www.terraform.io/docs/configuration/resources.html#count) parameter, inside a Resource. Count enables you to create multiple copies of a given resource. - -For example - -``` -variable "environments" { - type = "list" - - default = [ - "dev", - "staging" - "production" - ] -} - -resource "layer0_environment" "demo" { - count = "${length(var.environments)}" - - name = "${var.environments[count.index]}_demo" -} -``` - -Let's have a more in-depth look in how this works. You can start by navigating to `./terraform-beyond-layer0/example-2' folder. Start by running the plan command. - -`terraform plan` - -Outputs: -``` -+ module.environment.aws_dynamodb_table.guestbook.0 - ... - name: "dev_guestbook" -... -+ module.environment.aws_dynamodb_table.guestbook.1 - .. - name: "staging_guestbook" -... -``` - -Note that you will see a copy of each resource for each environment specified in your environments file in `./example-2/variables.tf`. Go ahead and run apply. - -`terraform apply` - -Outputs: -``` -Apply complete! Resources: 10 added, 0 changed, 0 destroyed. - -Outputs: - -guestbook_urls = - - -``` - -You have now created two separate environments using a single terraform configuration: dev & staging. You can navigate to both the urls output and you should note that they are separate instances of the guestbook application backed with their own separate data store. - -A common use case for maintaining different environments is to configure each environment slightly differently. For example, you might want to scale your Layer0 service to 3 for staging and leave it as 1 for the dev environment. This can be done easily by using conditional logic to set our `scale` parameter in the layer0 service configuration in `./example-2/main.tf`. Go ahead and open `main.tf` in a text editor. Navigate to the `layer0_service guestbook` section. Uncomment the scale parameter so that your configuration looks like below. - -``` -resource "layer0_service" "guestbook" { - count = "${length(var.environments)}" - - name = "${element(layer0_environment.demo.*.name, count.index)}_guestbook_svc" - environment = "${element(layer0_environment.demo.*.id, count.index)}" - deploy = "${element(layer0_deploy.guestbook.*.id, count.index)}" - load_balancer = "${element(layer0_load_balancer.guestbook.*.id, count.index)}" - scale = scale = "${lookup(var.service_scale, var.environments[count.index]), "1")}" -} -``` - -The variable `service_scale` is already defined in `variables.tf`. If you now go ahead and run plan, you will see that the `guestbook` service for only the `staging` environment will be scaled up. - -`terraform plan` - -Outputs: - -``` -~ layer0_service.guestbook.1 - scale: "1" => "3" -``` - -A potential downside of this approach however is that all your environments are using the same state file. Sharing a state file breaks some of the resource encapsulation between environments. Should there ever be a situation where your state file becomes corrupt, it would affect your ability to service all the environments till you resolve the issue by potentially rolling back to a previous copy of the state file. - -The next section will show you how you can separate your Terraform environment configuration such that each environment will have its own state file. - -!!! Note - As previously mentioned, you will want to avoid hardcoding resource parameter configuration values as much as possible. As an example the scale property of a layer0 service. But this extends to other properties as well like docker image version etc. You should avoid using `latest` and specify a explicit version via configurable variable when possible. - -### Multiple Terraform Configurations - -The previous example used a single set of Terraform Configuration files to create and maintain multiple environments. This resulted in a single state file which had the state information for all the environments. To avoid all environments sharing a single state file, you can split your Terraform configuration so that you a state file for each environment. - -Go ahead and navigate to `./terraform-beyond-layer0/example-3` folder. Here we are using a folder to separate each environment. So `env-dev` and `env-staging` represent a `dev` and `staging` environment. To work with either of the environments, you will need to navigate into the desired environment's folder and run Terraform commands. This will ensure that each environment will have its own state file. - -Open the env-dev folder inside a text editor. Note that `main.tf` doesn't contain any resource definitions. Instead, we only have one module definition which has various variables being passed in, which is also how we are passing in the `environment` variable. To create a `dev` and `staging` environments for our guestbook application, go ahead and run terraform plan and apply commands from `env-dev` and `env-staging` folders. - -``` -# assuming you are in the terraform-beyond-layer0/example-3 folder -cd env-dev -terraform get -terraform plan -terraform apply - -cd ../env-staging -terraform get -terraform plan -terraform apply -``` - -You should now have two instances of the guestbook application running. Note that our guestbook service in our staging environment has been scaled to 3. We have done this by specifying a map variable `service_scale` in `./example-3/dev-staging/variables.tf` which can have different scale values for each environment. - -## Part 8: Multiple Provider Instances - -You can define multiple instances of the same provider that is uniquely customized. For example, you can have an `aws` provider to support multiple regions, different roles etc or in the case of the `layer0` provider, to support multiple layer0 endpoints. - -For example: - -``` -# aws provider -provider "aws" { - alias = "east" - region = "us-east-1" - # ... -} - -# aws provider configured to a west region -provider "aws" { - alias = "west" - region = "us-west-1" - # ... -} -``` - -This will now allow you to reference aws providers configured to a different region. You can do so by referencing the provider using the naming scheme `TYPE.ALIAS`, which in the above example results in `aws.west`. See [Provider Configuration](https://www.terraform.io/docs/configuration/providers.html) for more information. - -``` -resource "aws.east_instance" "foo" { - # ... -} - -resource "aws.west_instance" "bar" { - # ... -} -``` - -## Part 9: Cleanup - -When you're finished with the examples in this guide, run the following destroy command in all the following directories to destroy the Layer0 environment, application and the DynamoDB Table. - -Directories: - - * /example-1 - * /example-2 - * /example-3/env-dev - * /example-3/env-staging - -`terraform destroy` - -!!! Tip "Remote Backend Resources" - If you created additional resources (S3 bucket and a DynamoDB Table) separately when configuring a [Remote Backend](#part-5-terraform-remote-state), do not forget to delete those if they are no longer needed. You should be able to look at your Terraform configuration file `layer0.tf` to determine the name of the bucket and table. - diff --git a/docs-src/docs/guides/walkthrough/deployment-1.md b/docs-src/docs/guides/walkthrough/deployment-1.md index af65e6207..799b6ed16 100644 --- a/docs-src/docs/guides/walkthrough/deployment-1.md +++ b/docs-src/docs/guides/walkthrough/deployment-1.md @@ -366,7 +366,47 @@ We don't need to do anything with it; we just want to make sure it's there. --- -### Part 2: Terraform Plan +### Part 2: Terraform Init + +This deployment has provider dependencies so an init call must be made. +(Terraform v0.11~ requries init) +At the command prompt, execute the following command: + +`terraform init` + +We should see output like the following: + +``` +Initializing modules... +- module.guestbook + +Initializing provider plugins... +- Checking for available provider plugins on https://releases.hashicorp.com... +- Downloading plugin for provider "template" (1.0.0)... + +The following providers do not have any version constraints in configuration, +so the latest version was installed. + +To prevent automatic upgrades to new major versions that may contain breaking +changes, it is recommended to add version = "..." constraints to the +corresponding provider blocks in configuration, with the constraint strings +suggested below. + +* provider.template: version = "~> 1.0" + +Terraform has been successfully initialized! + +You may now begin working with Terraform. Try running "terraform plan" to see +any changes that are required for your infrastructure. All Terraform commands +should now work. + +If you ever set or change modules or backend configuration for Terraform, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. +``` +--- + +### Part 3: Terraform Plan Before we actually create/update/delete any resources, it's a good idea to find out what Terraform intends to do. @@ -446,7 +486,7 @@ Once we're satisfied that Terraform will do what we want it to do, we can move o --- -### Part 3: Terraform Apply +### Part 4: Terraform Apply Run `terraform apply` to begin the process. diff --git a/docs-src/docs/guides/walkthrough/deployment-2.md b/docs-src/docs/guides/walkthrough/deployment-2.md index 2e61683dc..8c08e0dc6 100644 --- a/docs-src/docs/guides/walkthrough/deployment-2.md +++ b/docs-src/docs/guides/walkthrough/deployment-2.md @@ -310,7 +310,51 @@ This will create a local `.terraform/` directory. --- -### Part 2: Terraform Plan + +### Part 2: Terraform Init + +This deployment has provider dependencies so an init call must be made. +(Terraform v0.11~ requries init) +At the command prompt, execute the following command: + +`terraform init` + +We should see output like the following: + +``` +Initializing modules... +- module.redis + Getting source "github.com/quintilesims/redis//terraform" +- module.guestbook + Getting source "github.com/quintilesims/guides//guestbook/module" + +Initializing provider plugins... +- Checking for available provider plugins on https://releases.hashicorp.com... +- Downloading plugin for provider "template" (1.0.0)... + +The following providers do not have any version constraints in configuration, +so the latest version was installed. + +To prevent automatic upgrades to new major versions that may contain breaking +changes, it is recommended to add version = "..." constraints to the +corresponding provider blocks in configuration, with the constraint strings +suggested below. + +* provider.template: version = "~> 1.0" + +Terraform has been successfully initialized! + +You may now begin working with Terraform. Try running "terraform plan" to see +any changes that are required for your infrastructure. All Terraform commands +should now work. + +If you ever set or change modules or backend configuration for Terraform, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. +``` +--- + +### Part 3: Terraform Plan It's always a good idea to find out what Terraform intends to do, so let's do that: @@ -404,7 +448,7 @@ We should see that Terraform intends to add 7 new resources, some of which are f --- -### Part 2: Terraform Apply +### Part 4: Terraform Apply Run `terraform apply`, and we should see output similar to the following: diff --git a/docs-src/docs/guides/walkthrough/deployment-3.md b/docs-src/docs/guides/walkthrough/deployment-3.md deleted file mode 100644 index 5794481c0..000000000 --- a/docs-src/docs/guides/walkthrough/deployment-3.md +++ /dev/null @@ -1,423 +0,0 @@ -# Deployment 3: Guestbook + Redis + Consul - -In [Deployment 2](deployment-2#deploy-with-layer0-cli), we created two services in the same environment and linked them together manually. -While that can work for a small system, it's not really feasible for a system with a lot of moving parts - we would need to look up load balancer endpoints for all of our services and manually link them all together. -To that end, here we're going to to redeploy our two-service system using [Consul](https://www.consul.io), a service discovery tool. - -For this deployment, we'll create a cluster of Consul servers which will be responsible for keeping track of the state of our system. -We'll also deploy new versions of the Guestbook and Redis task definition files - in addition to creating a container for its respective application, each task definition creates two other containers: - - - a container for a Consul agent, which is in charge of communicating with the Consul server cluster - - a container for [Registrator](https://github.com/gliderlabs/registrator), which is charge of talking to the local Consul agent when a service comes up or goes down. - -You can choose to complete this section using either the [Layer0 CLI](#deploy-with-layer0-cli) or [Terraform](#deploy-with-terraform). - - -## Deploy with Layer0 CLI - -If you're following along, you'll want to be working in the `walkthrough/deployment-3/` directory of your clone of the [guides](https://github.com/quintilesims/guides) repo. - -Files used in this deployment: - -| Filename | Purpose | -|----------|---------| -| `CLI.Consul.Dockerrun.aws.json` | Template for running a Consul server | -| `CLI.Guestbook.Dockerrun.aws.json` | Template for running the Guestbook application with Registrator and Consul agent | -| `CLI.Redis.Dockerrun.aws.json` | Template for running a Redis server with Registrator and Consul agent | - - ---- - -### Part 1: Create the Consul Load Balancer - -The Consul server cluster will live in the same environment as our Guestbook and Redis services - if you've completed [Deployment 1](deployment-1) and [Deployment 2](deployment-2), this environment already exists as **demo-env**. -We'll start by creating a load balancer for the Consul cluster. -The load balancer will be private since only Layer0 services need to communicate with the Consul cluster. -At the command prompt, execute the following: - -`l0 loadbalancer create --port 8500:8500/tcp --port 8301:8301/tcp --private --healthcheck-target tcp:8500 demo-env consul-lb` - -We should see output like the following: - -``` -LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL -consull66b23 consul-lb consul-env 8500:8500/TCP false - 8301:8301/TCP -``` - -The following is a summary of the arguments passed in the above command: - -- `loadbalancer create`: creates a new load balancer -- `--port 8500:8500/tcp`: instructs the load balancer to forward requests from port 8500 on the load balancer to port 8500 in the EC2 instance using the TCP protocol -- `--port 8301:8301/tcp`: instructs the load balancer to forward requests from port 8301 on the load balancer to port 8301 in the EC2 instance using the TCP protocol -- `--private`: instructs the load balancer to ignore outside traffic -- `--healthcheck-target`: instructs the load balancer to use a TCP ping on port 8500 as the basis for deciding whether the service is healthy -- `demo-env`: the name of the environment in which the load balancer is being created -- `consul-lb`: a name for the load balancer itself - -While we're touching on the Consul load balancer, we should grab its URL - this is the one value that we'll need to know in order to deploy the rest of our system, no matter how large it may get. -At the command prompt, execute the following: - -`l0 loadbalancer get consul-lb` - -We should see output that looks like the output we just received above after creating the load balancer, but this time there is something in the **URL** column. -That URL is the value we're looking for. -Make note of it for when we reference it later. - - ---- - -### Part 2: Deploy the Consul Task Definition - -Before we can create the deploy, we need to supply the URL of the Consul load balancer that we got in Part 1. -In `CLI.Consul.Dockerrun.aws.json`, find the entry in the `environment` block that looks like this: - -``` -{ - "name": "CONSUL_SERVER_URL", - "value": "" -} -``` - -Update the "value" with the Consul load balancer's URL into and save the file. -We can then create the deploy. -At the command prompt, execute the following: - -`l0 deploy create CLI.Consul.Dockerrun.aws.json consul-dpl` - -We should see output like the following: - -``` -DEPLOY ID DEPLOY NAME VERSION -consul-dpl.1 consul-dpl 1 -``` - -The following is a summary of the arguments passed in the above command: - -- `deploy create`: creates a new Layer0 Deploy and allows you to specifiy an ECS task definition -- `CLI.Consul.Dockerrun.aws.json`: the file name of the ECS task definition (use the full path of the file if it is not in the current working directory) -- `consul-dpl`: a name for the deploy, which will later be used in creating the service - - ---- - -### Part 3: Create the Consul Service - -Here, we pull the previous resources together to create a service. -At the command prompt, execute the following: - -`l0 service create --wait --loadbalancer demo-env:consul-lb demo-env consul-svc consul-dpl:latest` - -We should see output like the following: - -``` -Waiting for Deployment... -SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE -consuls2f3c6 consul-svc demo-env consul-lb consul-dpl:1 1/1 -``` - -The following is a summary of the arguments passed in the above commands: - -- `service create`: creates a new Layer0 Service -- `--wait`: instructs the CLI to keep hold of the shell until the service has been successfully deployed -- `--loadbalancer demo-env:consul-lb`: the fully-qualified name of the load balancer behind which the service should live; in this case, the load balancer named **consul-lb** in the environment named **demo-env** -- `demo-env`: the name of the environment in which the service is to reside -- `consul-svc`: a name for the service itself -- `consul-dpl:latest`: the name and version of the deploy that the service should put into action - -Once the service has finished being deployed (and `--wait` has returned our shell to us), we need to scale the service. - -Currently, we only have one Consul server running in the cluster. -For best use, we should have at least 3 servers running (see [this link](https://www.consul.io/docs/internals/consensus.html) for more details on Consul servers and their concensus protocol). -Indeed, if we inspect the `command` block of the task definition file, we can find the following parameter: `-bootstrap-expect=3`. -This tells the Consul server that we have just deployed that it should be expecting a total of three servers. -We still need to fulfill that expectation, so we'll scale our service up to three. -At the command prompt, execute the following: - -`l0 service scale --wait consul-svc 3` - -We should see output like the following: - -``` -Waiting for Deployment... -SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE -consuls2f3c6 consul-svc demo-env consul-lb consul-dpl:1 3/3 -``` - -!!! WARNING "Important!" - The successful completion of the **scale** command doesn't mean that we're ready to move on just yet! - We need to check in on the logs (**l0 service logs consul-svc**) until we can confirm that all three of the Consul servers have synced up with each other. - Each **consul-server** section in the logs should be ending with **consul: Adding LAN server [ip address]** or **agent: Join completed**. - If you see one of the sections ending with **agent: Join failed, retrying in 30s**, you need to wait for that server to join the cluster before continuing. - - ---- - -### Part 4: Update and Redeploy the Redis and Guestbook Applications - -We're going to need the URL of the Consul load balancer again. -In each of the CLI.Redis and CLI.Guestbook task definition files, look for the `CONSUL_SERVER_URL` block in the `consul-agent` container and populate the value field with the Consul load balancer's URL, then save the file. -At the command prompt, execute the two following commands to create new versions of the deploys for the Redis and Guestbook applications: - -`l0 deploy create CLI.Redis.Dockerrun.aws.json redis-dpl` - -`l0 deploy create CLI.Guestbook.Dockerrun.aws.json guestbook-dpl` - -Then, execute the two following commands to redeploy the existing Redis and Guestbook services using those new deploys: - -`l0 service update --wait redis-svc redis-dpl:latest` - -`l0 service update --wait guestbook-svc guestbook-dpl:latest` - -!!! NOTE - Here, we should run `l0 service logs consul-svc` again and confirm that the Consul cluster has discovered these two services. - -We can use `l0 loadbalancer get guestbook-lb` to obtain the guestbook application's URL, and then navigate to it with a web browser. -Our guestbook app should be up and running - this time, it's been deployed without needing to know the address of the Redis backend! - -Of course, this is a simple example; in both this deployment and [Deployment 2](#2a-deploy-with-layer0-cli), we needed to use `l0 loadbalancer get` to obtain the URL of a load balancer. -However, in a system with many services that uses Consul like this example, we only ever need to find the URL of the Consul cluster - not the URLs of every service that needs to talk to another of our services. - - ---- - -### Part 5: Inspect the Consul Universe (Optional) - -Let's take a glimpse into how this system that we've deployed works. -**This requires that we have access to the key pair we've told Layer0 about when we [set it up](/setup/install/#part-2-create-an-access-key).** - - -#### Open Ports for SSH - -We want to SSH into the Guestbook EC2 instance, which means that we need to tell the Guestbook load balancer to allow SSH traffic through. -At the command prompt, execute the following: - -`l0 loadbalancer addport guestbook-lb 22:22/tcp` - -We should see output like the following: - -``` -LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL -guestbodb65a guestbook-lb demo-env guestbook-svc 80:80/HTTP true - 22:22/TCP -``` - -We need to take note of the load balancer's URL here, too. - - -#### SSH Into the Instance - -At the command prompt, execute the following: - -`ssh -i /path/to/keypair ec2-user@ -o ServerAliveInterval=30` - -(We'll probably be asked if we want to continue connecting - we do, so we'll enter `yes`.) - -Summary of arguments passed into the above command: - -- `-i /path/to/keypair`: this allows us to specify an identity file for use when connecting to the remote machine - in this case, we want to replace `/path/to/keypair` with the actual path to the keypair we created when we set up Layer0 -- `ec2-user@`: the address (here we want to replace `` with the actual URL of the guestbook load balancer) of the machine to which we want to connect and the name of the user (`ec2-user`) that we'd like to connect as -- `-o`: allows us to set parameters on the `ssh` command -- `ServerAliveInterval=30`: one of those `ssh` parameters - AWS imposes an automatic disconnect if a connection is not active for a certain amount of time, so we use this option to ping every 30 seconds to prevent that automatic disconnect - - -#### Look Around You - -We're now inside of the EC2 instance! -If we run `docker ps`, we should see that our three Docker containers (the Guestbook app, a Consul agent, and Registrator) are up and running, as well as an `amazon-ecs-agent` image. -But that's not the Consul universe that we came here to see. -At the EC2 instance's command prompt, execute the following: - -`echo $(curl -s localhost:8500/v1/catalog/services) | jq '.'` - -We should see output like the following: - -``` -{ - "consul": [], - "consul-8301": [ - "udp" - ], - "consul-8500": [], - "consul-8600": [ - "udp" - ], - "guestbook-redis": [], - "redis": [] -} -``` - -Summary of commands passed in the above command: - -- `curl -s localhost:8500/v1/catalog/services`: use `curl` to send a GET request to the specified URL, where `localhost:8500` is an HTTP connection to the local Consul agent in this EC2 instance (the `-s` flag just silences excess output from `curl`) -- `| jq '.'`: use a pipe (`|`) to take whatever returns from the left side of the pipe and pass it to the `jq` program, which we use here simply to pretty-print the JSON response -- `echo $(...)`: print out whatever returns from running the stuff inside of the parens; not necessary, but it gives us a nice newline after we get our response - -In that output, we can see all of the things that our local Consul agent knows about. -In addition to a few connections to the Consul server cluster, we can see that it knows about the Guestbook application running in this EC2 instance, as well as the Redis application running in a different instance with its own Consul agent and Registrator. - -Let's take a closer look at the Redis service and see how our Guestbook application is locating our Redis application. -At the EC2 instance's command prompt, execute the following: - -`echo $(curl -s http://localhost:8500/v1/catalog/service/redis) | jq '.'` - -We should see output like the following: - -``` -[ - { - "ID": "b4bb81e6-fe6a-c630-2553-7f6492ae5275", - "Node": "ip-10-100-230-97.us-west-2.compute.internal", - "Address": "10.100.230.97", - "Datacenter": "dc1", - "TaggedAddresses": { - "lan": "10.100.230.97", - "wan": "10.100.230.97" - }, - "NodeMeta": {}, - "ServiceID": "562aceee6935:ecs-l0-tlakedev-redis-dpl-20-redis-e0f989e5af97cdfd0e00:6379", - "ServiceName": "redis", - "ServiceTags": [], - "ServiceAddress": "10.100.230.97", - "ServicePort": 6379, - "ServiceEnableTagOverride": false, - "CreateIndex": 761, - "ModifyIndex": 761 - } -] - -``` - -To _really_ see how the Guestbook application connects to Redis, we can take an _even closer_ look! - -Run `docker ps` to generate a listing of all the containers that Docker is running on the EC2 instance, and note the Container ID for the Guestbook container. Then run the following command to connect to the Guestbook container: - -`docker exec -it [container_id] /bin/sh` - -Once we've gotten inside the container, we'll run a similar command to the previous `curl`: - -`curl -s consul-agent:8500/v1/catalog/service/redis` - -Our Guestbook application makes a call like this one and figures out how to connect to the Redis service by mushing together the information from the `ServiceAddress` and `ServicePort` fields! - -To close the `ssh` connection to the EC2 instance, run `exit` in the command prompt. - - ---- - -### Cleanup - -When you're finished with the example, we can instruct Layer0 to terminate the applications and delete the environment. - -`l0 environment delete demo-env` - - ---- - -## Deploy with Terraform - -As before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI. -As before, we will assume that you've cloned the [guides](https://github.com/quintilesims/guides) repo and are working in the `iterative-walkthrough/deployment-3/` directory. - -We'll use these files to manage our deployment with Terraform: - -| Filename | Purpose | -|----------|---------| -| `Guestbook.Dockerrun.aws.json` | Template for running the Guestbook application | -| `main.tf` | Provisions resources; populates variables in template files | -| `outputs.tf` | Values that Terraform will yield during deployment | -| `Redis.Dockerrun.aws.json` | Template for running the Redis application | -| `terraform.tfstate` | Tracks status of deployment _(created and managed by Terraform)_ | -| `terraform.tfvars` | Variables specific to the environment and application(s) | -| `variables.tf` | Values that Terraform will use during deployment | - ---- - -### `*.tf`: A Brief Aside: Revisited: Redux - -In looking at `main.tf`, you can see that we're pulling in a Consul module that we maintain (here's the [repo](https://github.com/quintilesims/consul)); this removes the need for a local task definition file. - -We also are continuing to use modules for Redis and Guestbook. -However, instead of just sourcing the module and passing in a value or two, you can see that we actually create new deploys from local task definition files and pass those deploys in to the module. -This design allows us to use pre-made modules while also offering a great deal of flexibility. -If you'd like to follow along the Redis deployment logic chain (the other applications/services work similarly), it goes something like this: - -- `main.tf` creates a deploy for the Redis server by rendering a local task definition and populating it with certain values -- `main.tf` passes the ID of the deploy into the Redis module, along with other values the module requires -- [the Redis module](https://github.com/quintilesims/redis/tree/master/terraform) pulls all the variables it knows about (both the defaults in `variables.tf` as well as the ones passed in) -- among other Layer0/AWS resources, the module spins up a Redis service; since a deploy ID has been provided, it uses that deploy to create the service instead of a deploy made from a [default task definition](https://github.com/quintilesims/redis/tree/master/terraform/Dockerrun.aws.json) contained within the module - - ---- - -### Part 1: Terraform Get - -Run `terraform get` to pull down all the source materials Terraform needs for our deployment. - - ---- - -### Part 2: Terraform Plan - -As before, we can run `terraform plan` to see what's going to happen. -We should see that there are 12 new resources to be created: - -- the environment -- the two local deploys which will be used for Guestbook and Redis -- the load balancer, deploy, and service from each of the Consul, Guestbook, and Redis modules - - _note that even though the default modules' deploys are created, they won't actually be used to deploy services_ - - ---- - -### Part 3: Terraform Apply - -Run `terraform apply`, and we should see output similar to the following: - -``` -data.template_file.consul: Refreshing state... -layer0_deploy.consul-dpl: Creating... - -... -... -... - -layer0_service.guestbook-svc: Creation complete - -Apply complete! Resources: 10 added, 0 changed, 0 destroyed. - -The state of your infrastructure has been saved to the path -below. This state is required to modify and destroy your -infrastructure, so keep it safe. To inspect the complete state -use the `terraform show` command. - -State path: terraform.tfstate - -Outputs: - -guestbook_url = -``` - -!!! Note - It may take a few minutes for the guestbook service to launch and the load balancer to become available. - During that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL. - - -### What's Happening - -Terraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment. -Terraform also writes the state of your deployment to the `terraform.tfstate` file (creating a new one if it's not already there). - - -### Cleanup - -When you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application. -Execute the following command (in the same directory): - -`terraform destroy` - -It's also now safe to remove the `.terraform/` directory and the `*.tfstate*` files. - - ---- - diff --git a/docs-src/docs/index.md b/docs-src/docs/index.md index 957e33ae4..f6633516e 100644 --- a/docs-src/docs/index.md +++ b/docs-src/docs/index.md @@ -1,6 +1,11 @@ -![Layer0 Logo](/static/logo_rs.png) +

+ Layer0 Logo +

+
# Build, Manage, and Deploy Your Application +
+ ## Meet Layer0 Layer0 is a framework that helps you deploy web applications to the cloud with minimal fuss. Using a simple command line interface (CLI), you can manage the entire life cycle of your application without having to focus on infrastructure. @@ -8,10 +13,11 @@ Layer0 is a framework that helps you deploy web applications to the cloud with m Ready to learn more about Layer0? See our [introduction page](intro.md) to learn about some important concepts. When you're ready to get started, take a look at the [installation page](setup/install.md) for information about setting up Layer0. ## Download -| Download **v0.10.3** | | -| - | - | - | +| Download **v0.10.4** | | | +|:-:|:-:|:-:| | ![Darwin](/static/icon_darwin.png) | ![Linux](/static/icon_linux.png) | ![Windows](/static/icon_windows.png) | -| [macOS](https://s3.amazonaws.com/xfra-layer0/release/v0.10.3/layer0_v0.10.3_darwin.zip) | [Linux](https://s3.amazonaws.com/xfra-layer0/release/v0.10.3/layer0_v0.10.3_linux.zip) | [Windows](https://s3.amazonaws.com/xfra-layer0/release/v0.10.3/layer0_v0.10.3_windows.zip) | +| [macOS](https://s3.amazonaws.com/xfra-layer0/release/v0.10.4/layer0_v0.10.4_darwin.zip) | [Linux](https://s3.amazonaws.com/xfra-layer0/release/v0.10.4/layer0_v0.10.4_linux.zip) | [Windows](https://s3.amazonaws.com/xfra-layer0/release/v0.10.4/layer0_v0.10.4_windows.zip) | + ## Contact Us diff --git a/docs-src/docs/intro.md b/docs-src/docs/intro.md index cdf8ab09a..9462df962 100644 --- a/docs-src/docs/intro.md +++ b/docs-src/docs/intro.md @@ -29,7 +29,7 @@ Powerful tools that give you the basic building blocks for high-availability, sc ### Services -Your running Layer0 applications. We also use the term `service` for tools such as Consul, for which we provide a pre-built [sample implementation](guides/consul) using Layer0. +Your running Layer0 applications. We also use the term `service` for tools such as Consul, for which we provide a pre-built [sample implementation](https://github.com/quintilesims/consul) using Layer0. ### Environments diff --git a/docs-src/docs/reference/cli.md b/docs-src/docs/reference/cli.md index 814174973..ad6ed4822 100644 --- a/docs-src/docs/reference/cli.md +++ b/docs-src/docs/reference/cli.md @@ -1,111 +1,81 @@ # Layer0 CLI Reference -##Global options - -The **l0** application is designed to be used with one of several subcommands: [admin](#admin), [deploy](#deploy), [environment](#environment), [job](#job), [loadbalancer](#loadbalancer), [service](#service), and [task](#task). These subcommands are detailed in the sections below. There are, however, some global parameters that you may specify when using **l0**. - -####Usage -
-
-
**l0** [_globalOptions_] _command_ _subcommand_ [_options_] [_parameters_]
-
-
- -####Optional arguments -
-
-
--output {text|json}
-
Specify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the **--output json** option, you can force **l0** to output JSON-formatted text.
-
-
-
--version
-
Display the version number of the **l0** application.
-
-
+## Global options + +The `l0` application is designed to be used with one of several commands: [admin](#admin), [deploy](#deploy), [environment](#environment), [job](#job), [loadbalancer](#loadbalancer), [service](#service), and [task](#task). These commands are detailed in the sections below. There are, however, some global parameters that you may specify whenever using `l0`. + +#### Usage +``` +l0 [global options] command subcommand [subcommand options] params +``` + +#### Global options +* `-o [text|json], --output [text|json]` - Specify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the `--output json` option, you can force `l0` to output JSON-formatted text. +* `-t value, --timeout value` - Specify the timeout for running `l0` commands. Values can be in h, m, s, or ms. +* `-d, --debug` - Print debug statements +* `-v, --version` - Display the version number of the `l0` application. --- -##Admin -The **admin** command is used to manage the Layer0 API server. This command is used with the following subcommands: [debug](#admin-debug), [sql](#admin-sql), and [version](#admin-version). +## Admin +The `admin` command is used to manage the Layer0 API server. This command is used with the following subcommands: [debug](#admin-debug), [sql](#admin-sql), and [version](#admin-version). ### admin debug -Use the **debug** subcommand to view the running version of your Layer0 API server and CLI. +Use the `debug` subcommand to view the running version of your Layer0 API server and CLI. #### Usage -
-
-
**l0 admin debug**
-
-
+``` +l0 admin debug +``` ### admin sql -Use the **sql** subcommand to initialize the Layer0 API database. +Use the `sql` subcommand to initialize the Layer0 API database. #### Usage -
-
-
**l0 admin sql**
-
-
- -####Additional information -
-
-
The **sql** subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so.
-
-
+``` +l0 admin sql +``` + +#### Additional information +The `sql` subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so. ### admin version -Use the **version** subcommand to display the current version of the Layer0 API. +Use the `version` subcommand to display the current version of the Layer0 API. #### Usage -
-
-
**l0 admin version**
-
-
+``` +l0 admin version +``` --- -##Deploy +## Deploy +Deploys are ECS Task Definitions. They are configuration files that detail how to deploy your application. +The `deploy` command is used to manage Layer0 environments. This command is used with the following subcommands: [create](#deploy-create), [delete](#deploy-delete), [get](#deploy-get), and [list](#deploy-list). ### deploy create -Use the **create** subcommand to upload a Docker task definition into Layer0. This command is used with the following subcommands: [create](#deploy-create), [delete](#deploy-delete), [get](#deploy-get) and [list](#deploy-list). +Use the `create` subcommand to upload a Docker task definition into Layer0. #### Usage -
-
-
**l0 deploy create** _dockerPath_ _deployName_
-
-
- -####Required parameters -
-
-
_dockerPath_
-
The path to the Docker task definition that you want to upload.
-
-
-
_deployName_
-
A name for the deploy.
-
-
- -####Additional information -
-
-
If _deployName_ exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version.
-

-
-
If you use Visual Studio to modify or create your Dockerrun file, you may see an "Invalid Dockerrun.aws.json" error. This error is caused by the default encoding used by Visual Studio. See the ["Common issues" page](http://localhost:8000/troubleshooting/commonissues/#invalid-dockerrunawsjson-error-when-creating-a-deploy) for steps to resolve this issue.
-

-
-
+``` +l0 deploy create taskDefPath deployName +``` + +#### Required parameters +* `taskDefPath` - The path to the Docker task definition that you want to upload. +* `deployName` - A name for the deploy. + +#### Additional information +If `deployName` exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version. + +If you use Visual Studio to modify or create your Dockerrun file, you may see an "Invalid Dockerrun.aws.json" error. This error is caused by the default encoding used by Visual Studio. See the ["Common issues" page](http://localhost:8000/troubleshooting/commonissues/#invalid-dockerrunawsjson-error-when-creating-a-deploy) for steps to resolve this issue. + Deploys created through Layer0 are rendered with a `logConfiguration` section for each container. If a `logConfiguration` section already exists, no changes are made to the section. The additional section enables logs from each container to be sent to the the Layer0 log group. This is where logs are looked up during `l0 logs` commands. The added `logConfiguration` section uses the following template: + ``` "logConfiguration": { "logDriver": "awslogs", @@ -117,123 +87,76 @@ The added `logConfiguration` section uses the following template: } } ``` -
-
-
- ### deploy delete -Use the **delete** subcommand to delete a version of a Layer0 deploy. +Use the `delete` subcommand to delete a version of a Layer0 deploy. #### Usage -
-
-
**l0 deploy delete** _deployID_
-
-
- -####Required parameters -
-
-
_deployID_
-
The unique identifier of the version of the deploy that you want to delete. You can obtain a list of deployIDs for a given deploy by executing the following command: **l0 deploy get** _deployName_
-
-
+``` +l0 deploy delete deployName +``` + +#### Required parameters +* `deployName` - The name of the Layer0 deploy you want to delete. ### deploy get -Use the **get** subcommand to view information about an existing Layer0 deploy. +Use the `get` subcommand to view information about an existing Layer0 deploy. #### Usage -
-
-
**l0 deploy get** _deployName_
-
-
- -####Required parameters -
-
-
_deployName_
-
The name of the Layer0 deploy for which you want to view additional information.
-
-
- -####Additional information -
-
-
The **get** subcommand supports wildcard matching: `l0 deploy get dep*` would return all deploys beginning with `dep`.
-
-
+``` +l0 deploy get deployName +``` + +#### Required parameters +* `deployName` - The name of the Layer0 deploy for which you want to view additional information. + +#### Additional information +The `get` subcommand supports wildcard matching: `l0 deploy get dep*` would return all deploys beginning with `dep`. ### deploy list -Use the **list** subcommand to view a list of deploys in your instance of Layer0. +Use the `list` subcommand to view a list of deploys in your instance of Layer0. #### Usage -
-
-
**l0 deploy list**
-
-
+``` +l0 deploy list +``` --- ## Environment Layer0 environments allow you to isolate services and load balancers for specific applications. -The **environment** command is used to manage Layer0 environments. This command is used with the following subcommands: [create](#environment-create), [delete](#environment-delete), [get](#environment-get), [list](#environment-list), and [setmincount](#environment-setmincount). +The `environment` command is used to manage Layer0 environments. This command is used with the following subcommands: [create](#environment-create), [delete](#environment-delete), [get](#environment-get), [list](#environment-list), and [setmincount](#environment-setmincount). ### environment create -Use the **create** subcommand to create an additional Layer0 environment (_environmentName_). +Use the `create` subcommand to create a new Layer0 environment. #### Usage -
-
-
**l0 environment create** [--size] [--min-count] [--user-data] [--os] [--ami] _environmentName_
-
-
- -####Required parameters -
-
-
_environmentName_
-
A name for the environment.
-
-
- -####Optional arguments -
-
-
--size
-
The size of the EC2 instances to create in your environment (default: m3.medium).
-
-
-
--min-count
-
The minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0).
-
-
-
--user-data
-
The user data template to use for the environment's autoscaling group.
-
-
-
--os
-
The operating system used in the environment. Options are "linux" or "windows" (default: linux). - More information on windows environments is documented below
-
-
-
--ami
-
A custom AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system.
-
-
- -The user data template can be used to add custom configuration to your Layer0 environment. +``` +l0 environment create [--size size | --min-count mincount | + --user-data path | --os os | --ami amiID] environmentName +``` + +#### Required parameters +* `environmentName` - A name for the environment. + +#### Optional arguments +* `--size size` - The instance size of the EC2 instances to create in your environment (default: m3.medium). +* `--min-count mincount` - The minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0). +* `--user-data path` - The user data template file to use for the environment's autoscaling group. +* `--os os` - The operating system used in the environment. Options are "linux" or "windows" (default: linux). More information on windows environments is documented below. +* `ami amiID` - A custom EC2 AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system. + +The user data template can be used to add custom configuration to your Layer0 environment. They are usually scripts that are executed at instance launch time to ensure an EC2 instance is in the correct state after the provisioning process finishes. Layer0 uses [Go Templates](https://golang.org/pkg/text/template) to render user data. Currently, two variables are passed into the template: **ECSEnvironmentID** and **S3Bucket**. -Please review the [ECS Tutorial](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html) -to better understand how to write a user data template, and use at your own risk! +!!! danger + Please review the [ECS Tutorial](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html) + to better understand how to write a user data template, and use at your own risk! **Linux Environments**: The default Layer0 user data template is: -``` +``` bash #!/bin/bash echo ECS_CLUSTER={{ .ECSEnvironmentID }} >> /etc/ecs/ecs.config echo ECS_ENGINE_AUTH_TYPE=dockercfg >> /etc/ecs/ecs.config @@ -246,7 +169,7 @@ start ecs ``` **Windows Environments**: The default Layer0 user data template is: -``` +``` powershell # Set agent env variables for the Machine context (durable) $clusterName = "{{ .ECSEnvironmentID }}" @@ -254,7 +177,7 @@ Write-Host Cluster name set as: $clusterName -foreground green [Environment]::SetEnvironmentVariable("ECS_CLUSTER", $clusterName, "Machine") [Environment]::SetEnvironmentVariable("ECS_ENABLE_TASK_IAM_ROLE", "false", "Machine") -$agentVersion = 'v1.14.0-1.windows.1' +$agentVersion = 'v1.5.2' $agentZipUri = "https://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-windows-$agentVersion.zip" $agentZipMD5Uri = "$agentZipUri.md5" @@ -314,942 +237,472 @@ When creating Windows environments in Layer0, the root volume sizes for instance It can take as long as 45 minutes for a new windows container to come online. ### environment delete -Use the **delete** subcommand to delete an existing Layer0 environment. - -####Usage -
-
-
**l0 environment delete** [--wait] _environmentName_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the Layer0 environment that you want to delete.
-
-
- -####Optional arguments -
-
-
--wait
-
Wait until the deletion is complete before exiting.
-
-
- -####Additional information -
-
-
This operation performs several tasks asynchronously. When run without the _--wait_ option, this operation will most likely exit before all of these tasks are complete; when run with the _--wait_ option, this operation will only exit once these tasks have completed.
-
-
+Use the `delete` subcommand to delete an existing Layer0 environment. + +#### Usage +``` +l0 environment delete [--wait] environmentName +``` + +#### Required parameters +* `environmentName` - The name of the Layer0 environment that you want to delete. + +#### Optional arguments +* `--wait` - Wait until the deletion is complete before exiting. + +#### Additional information +This operation performs several tasks asynchronously. When run without the `--wait` option, this operation will most likely exit before all of these tasks are complete; when run with the `--wait` option, this operation will only exit once these tasks have completed. ### environment get -Use the **get** subcommand to display information about an existing Layer0 environment. +Use the `get` subcommand to display information about an existing Layer0 environment. #### Usage -
-
-
**l0 environment get** _environmentName_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the Layer0 environment for which you want to view additional information.
-
-
- -####Additional information -
-
-
The **get** subcommand supports wildcard matching: `l0 environment get test*` would return all environments beginning with `test`.
-
-
+``` +l0 environment get environmentName +``` + +#### Required parameters +* `environmentName` - The name of the Layer0 environment for which you want to view additional information. + +#### Additional information +The `get` subcommand supports wildcard matching: `l0 environment get test*` would return all environments beginning with `test`. ### environment list -Use the **list** subcommand to display a list of environments in your instance of Layer0. +Use the `list` subcommand to display a list of environments in your instance of Layer0. #### Usage -
-
-
**l0 environment list**
-
-
+``` +l0 environment list +``` ### environment setmincount -Use the **setmincount** subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group. - -####Usage -
-
-
**l0 enviroment setmincount** _environmentName_ _count_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the Layer0 environment that you want to delete.
-
-
-
_count_
-
The minimum number of instances allowed in the environment's autoscaling group.
-
-
+Use the `setmincount` subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group. + +#### Usage +``` +l0 environment setmincount environmentName count +``` + +#### Required parameters +* `environmentName` - The name of the Layer0 environment that you want to adjust. +* `count` - The minimum number of instances allowed in the environment's autoscaling group. ### environment link -Use the **link** subcommand to link two environments together. +Use the `link` subcommand to link two environments together. When environments are linked, services inside the environments are allowed to communicate with each other as if they were in the same environment. This link is bidirectional. This command is idempotent; it will succeed even if the two specified environments are already linked. -####Usage -
-
-
**l0 environment link** _sourceEnvironmentName_ _destEnvironmentName_
-
-
- -####Required parameters -
-
-
_sourceEnvironmentName_
-
The name of the first environment to link.
-
-
-
_destEnvironmentName_
-
The name of the second environment to link.
-
-
+#### Usage +``` +l0 environment link sourceEnvironmentName destEnvironmentName +``` +#### Required parameters +* `sourceEnvironmentName` - The name of the source environment to link. +* `destEnvironmentName` - The name of the destination environment to link. ### environment unlink -Use the **unlink** subcommand to remove the link between two environments. +Use the `unlink` subcommand to remove the link between two environments. This command is idempotent; it will succeed even if the link does not exist. -####Usage -
-
-
**l0 environment unlink** _sourceEnvironmentName_ _destEnvironmentName_
-
-
- -####Required parameters -
-
-
_sourceEnvironmentName_
-
The name of the first environment to unlink.
-
-
-
_destEnvironmentName_
-
The name of the second environment to unlink.
-
-
+#### Usage +``` +l0 environment unlink sourceEnvironmentName destEnvironmentName +``` + +#### Required parameters +* `sourceEnvironmentName` - The name of the source environment to unlink. +* `destEnvironmentName` - The name of the destination environment to unlink. --- -##Job +## Job A Job is a long-running unit of work performed on behalf of the Layer0 API. -Jobs are executed as Layer0 tasks that run in the **api** Environment. -The **job** command is used with the following subcommands: [logs](#job-logs), [delete](#job-delete), [get](#job-get), and [list](#job-list). +Jobs are executed as Layer0 tasks that run in the **api** environment. +The `job` command is used with the following subcommands: [logs](#job-logs), [delete](#job-delete), [get](#job-get), and [list](#job-list). ### job logs -Use the **logs** subcommand to display the logs from a Layer0 job that is currently running. - -####Usage -
-
-
**l0 job logs** [--start *MM/DD HH:MM*] [--end *MM/DD HH:MM*] [--tail=*N* ] _jobName_
-
-
- -####Required parameters -
-
-
_jobName_
-
The name of the Layer0 job for which you want to view logs.
-
-
- -####Optional arguments -
-
-
--start *MM/DD HH:MM*
-
The start of the time range to fetch logs.
-
-
-
--end *MM/DD HH:MM*
-
The end of the time range to fetch logs.
-
-
-
--tail=*N*
-
Display only the last _N_ lines of the log.
-
-
- -###job delete -Use the **delete** subcommand to delete an existing job. - -####Usage -
-
-
**l0 job delete** *jobName*
-
-
- -####Required parameters -
-
-
*jobName*
-
The name of the job that you want to delete.
-
-
- -###job get -Use the **get** subcommand to display information about an existing Layer0 job. - -####Usage -
-
-
**l0 job get** *jobName*
-
-
- -####Required parameters -
-
-
_jobName_
-
The name of an existing Layer0 job.
-
-
- -####Additional information -
-
-
The **get** subcommand supports wildcard matching: `l0 job get 2a55*` would return all jobs beginning with `2a55`.
-
-
- -###job list -Use the **list** subcommand to display information about all of the existing jobs in an instance of Layer0. - -####Usage -
-
-
**l0 job list**
-
-
+Use the `logs` subcommand to display the logs from a Layer0 job that is currently running. + +#### Usage +``` +l0 job logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] jobName +``` + +#### Required parameters +* `jobName` - The name of the Layer0 job for which you want to view logs. + +#### Optional arguments +* `--start MM/DD HH:MM` - The start of the time range to fetch logs. +* `--end MM/DD HH:MM` - The end of the time range to fetch logs. +* `--tail=N` - Display only the last `N` lines of the log. + +### job delete +Use the `delete` subcommand to delete an existing job. + +#### Usage +``` +l0 job delete jobName +``` + +#### Required parameters +* `jobName` - The name of the job that you want to delete. + +### job get +Use the `get` subcommand to display information about an existing Layer0 job. + +#### Usage +``` +l0 job get jobName +``` + +#### Required parameters +* `jobName` - The name of an existing Layer0 job to display. + +#### Additional information +The `get` subcommand supports wildcard matching: `l0 job get 2a55*` would return all jobs beginning with `2a55`. + +### job list +Use the `list` subcommand to display information about all of the existing jobs in an instance of Layer0. + +#### Usage +``` +l0 job list +``` --- -##Loadbalancer -A load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 [services](#service). The **loadbalancer** command is used with the following subcommands: [create](#loadbalancer-create), [delete](#loadbalancer-delete), [addport](#loadbalancer-addport), [dropport](#loadbalancer-dropport), [get](#loadbalancer-get), [list](#loadbalancer-list), and [healthcheck](#loadbalancer-healthcheck). - -###loadbalancer create -Use the **create** subcommand to create a new load balancer. - -####Usage -
-
-
**l0 loadbalancer create** [--port _port_ --port _port_ ...] [--certificate _certificateName_] [--private] [healthcheck-flags]_environmentName loadBalancerName_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the existing Layer0 environment in which you want to create the load balancer.
-
-
-
_loadBalancerName_
-
A name for the load balancer.
-
-
- -####Optional arguments -
-
-
- --port _hostPort:containerPort/protocol_ -
-
-

The port configuration for the load balancer. _hostPort_ is the port on which the load balancer will listen for traffic; _containerPort_ is the port that traffic will be forwarded to. You can specify multiple ports using _--port xxx --port yyy_. If this option is not specified, Layer0 will use the following configuration: 80:80/tcp

-
-
-
-
- --certificate _certificateName_ -
-
-

The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.

-
-
-
-
- --private -
-
-

When you use this option, the load balancer will only be accessible from within the Layer0 environment.

-
-
-
-
- --healthcheck-target _target_ -
-
-

The target of the check. Valid pattern is `PROTOCOL:PORT/PATH` _(default: `"TCP:80"`)_ -
- If PROTOCOL is `HTTP` or `HTTPS`, both PORT and PATH are required -
- - _example: `HTTP:80/admin/healthcheck`_ -
- If PROTOCOL is `TCP` or `SSL`, PORT is required and PATH is not supported -
- - _example: `TCP:80`_

-
-
-
-
- --healthcheck-interval _interval_ -
-
-

The interval between checks _(default: `30`)_.

-
-
-
-
- --healthcheck-timeout _timeout_ -
-
-

The length of time before the check times out _(default: `5`)_.

-
-
-
-
- --healthcheck-healthy-threshold _healthyThreshold_ -
-
-

The number of checks before the instance is declared healthy _(default: `2`)_.

-
-
-
-
- --healthcheck-unhealthy-threshold _unhealthyThreshold_ -
-
-

The number of checks before the instance is declared unhealthy _(default: `2`)_.

-
-
-
- -!!! Note "Ports and Health Checks" - When both the `--port` and the `--healthcheck-target` options are omitted, Layer0 configures the load balancer with some default values: `80:80/tcp` for ports and `tcp:80` for healthcheck target. +## Load Balancer +A load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 [services](#service). The `loadbalancer` command is used with the following subcommands: [create](#loadbalancer-create), [delete](#loadbalancer-delete), [addport](#loadbalancer-addport), [dropport](#loadbalancer-dropport), [get](#loadbalancer-get), [list](#loadbalancer-list), and [healthcheck](#loadbalancer-healthcheck). + +### loadbalancer create +Use the `create` subcommand to create a new load balancer. + +#### Usage +``` +l0 loadbalancer create [--port port ... | --certificate certifiateName | + --private | --healthcheck-target target | --healthcheck-interval interval | + --healthcheck-timeout timeout | --healthcheck-healthy-threshold healthyThreshold | + --healthcheck-unhealthy-threshold unhealthyThreshold] environmentName loadBalancerName +``` + +#### Required parameters +* `environmentName` - The name of the existing Layer0 environment in which you want to create the load balancer. +* `loadBalancerName` - A name for the load balancer you are creating. + +#### Optional arguments +* `--port port ...` - The port configuration for the listener of the load balancer. Valid pattern is `hostPort:containerPort/protocol`. Multiple ports can be specified using `--port port1 --port port2 ...` (default: `80/80:TCP`). + * `hostPort` - The port that the load balancer will listen for traffic on. + * `containerPort` - The port that the load balancer will forward traffic to. + * `protocol` - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS). +* `--certificate certificateName` - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration. +* `--private` - When you use this option, the load balancer will only be accessible from within the Layer0 environment. +* `--healthcheck-target target` - The target of the check. Valid pattern is `PROTOCOL:PORT/PATH` (default: `"TCP:80"`). + * If `PROTOCOL` is `HTTP` or `HTTPS`, both `PORT` and `PATH` are required. Example: `HTTP:80/admin/healthcheck`. + * If `PROTOCOL` is `TCP` or `SSL`, `PORT` is required and `PATH` is not used. Example: `TCP:80` +* `--healthcheck-interval interval` - The interval between checks (default: `30`). +* `--healthcheck-timeout timeout` - The length of time before the check times out (default: `5`). +* `--healthcheck-healthy-threshold healthyThreshold` - The number of checks before the instance is declared healthy (default: `2`). +* `--healthcheck-unhealthy-threshold unhealthyThreshold` - The number of checks before the instance is declared unhealthy (default: `2`). + +!!! info "Ports and Health Checks" + When both the `--port` and the `--healthcheck-target` options are omitted, Layer0 configures the load balancer with some default values: `80:80/TCP` for ports and `TCP:80` for healthcheck target. These default values together create a load balancer configured with a simple but functioning health check, opening up a set of ports that allows traffic to the target of the healthcheck. - (`--healthcheck-target tcp:80` tells the load balancer to ping its services at port 80 to determine their status, and `--port 80:80/tcp` configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services) + (`--healthcheck-target TCP:80` tells the load balancer to ping its services at port 80 to determine their status, and `--port 80:80/TCP` configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services) When creating a load balancer with non-default configurations for either `--port` or `--healthcheck-target`, make sure that a valid `--port` and `--healthcheck-target` pairing is also created. -###loadbalancer delete -Use the **delete** subcommand to delete an existing load balancer. - -####Usage -
-
-
**l0 loadbalancer delete** [--wait] *loadBalancerName*
-
-
- -####Required parameters -
-
-
*loadBalancerName*
-
The name of the load balancer that you want to delete.
-
-
- -####Optional arguments -
-
-
--wait
-
Wait until the deletion is complete before exiting.
-
-
- -####Additional information -
-
-
In order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer.
-

-
-
This operation performs several tasks asynchronously. When run without the _--wait_ option, this operation will most likely exit before all of these tasks are complete; when run with the _--wait_ option, this operation will only exit once these tasks have completed.
-
-
- -###loadbalancer addport -Use the **addport** subcommand to add a new port configuration to an existing Layer0 load balancer. - -####Usage -
-
-
**l0 loadbalancer addport** *loadBalancerName hostPort:containerPort/protocol* [--certificate _certificateName_]
-
-
- -####Required parameters -
-
-
_loadBalancerName_
-
The name of an existing Layer0 load balancer in which you want to add the port configuration.
-
-
-
_hostPort_
-
The port that the load balancer will listen on.
-
-
-
_containerPort_
-
The port that the load balancer will forward traffic to.
-
-
-
_protocol_
-
The protocol to use when forwarding traffic (acceptable values: tcp, ssl, http, and https).
-
-
- -####Optional arguments -
-
-
--certificate _certificateName_
-
The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.
-
-
- -####Additional information -
-
-
The port configuration you specify must not already be in use by the load balancer you specify.
-
-
- -###loadbalancer dropport -Use the **dropport** subcommand to remove a port configuration from an existing Layer0 load balancer. - -####Usage -
-
-
**l0 loadbalancer dropport** *loadBalancerName* *hostPort*
-
-
- -####Required parameters -
-
-
_loadBalancerName_
-
The name of an existing Layer0 load balancer in which you want to remove the port configuration.
-
-
-
_hostPort_
-
The host port to remove from the load balancer.
-
-
- -###loadbalancer get -Use the **get** subcommand to display information about an existing Layer0 load balancer. - -####Usage -
-
-
**l0 loadbalancer get** *environmentName:loadBalancerName*
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of an existing Layer0 environment.
-
-
-
_loadBalancerName_
-
The name of an existing Layer0 load balancer.
-
-
- -####Additional information -
-
-
The **get** subcommand supports wildcard matching: `l0 loadbalancer get entrypoint*` would return all jobs beginning with `entrypoint`.
-
-
- -###loadbalancer list -Use the **list** subcommand to display information about all of the existing load balancers in an instance of Layer0. - -####Usage -
-
-
**l0 loadbalancer list**
-
-
- -###loadbalancer healthcheck -Use the **healthcheck** subcommand to display information about or update the configuration of a load balancer's health check. - -####Usage -
-
-
**l0 loadbalancer healthcheck** [healthcheck-flags] *loadbalancerName*
-
-
- -####Optional arguments -
-
-
- --set-target _target_ -
-
-

The target of the check. Valid pattern is `PROTOCOL:PORT/PATH`, where PROTOCOL values are: -
- `HTTP` or `HTTPS`: both PORT and PATH are required -
- - _example: `HTTP:80/admin/healthcheck`_ -
- `TCP` or `SSL`: PORT is required, PATH is not supported -
- - _example: `TCP:80`_

-
-
-
-
- --set-interval _interval_ -
-
-

The interval between checks.

-
-
-
-
- --set-timeout _timeout_ -
-
-

The length of time before the check times out.

-
-
-
-
- --set-healthy-threshold _healthyThreshold_ -
-
-

The number of checks before the instance is declared healthy.

-
-
-
-
- --set-unhealthy-threshold _unhealthyThreshold_ -
-
-

The number of checks before the instance is declared unhealthy.

-
-
-
- -####Additional information - -
-
-
Calling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged.
-
-
+### loadbalancer delete +Use the `delete` subcommand to delete an existing load balancer. + +#### Usage +``` +l0 loadbalancer delete [--wait] loadBalancerName +``` + +#### Required parameters +* `loadBalancerName` - The name of the load balancer that you want to delete. + +#### Optional arguments +* `--wait` - Wait until the deletion is complete before exiting. + +#### Additional information +In order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer. + +This operation performs several tasks asynchronously. When run without the `--wait` option, this operation will most likely exit before all of these tasks are complete; when run with the `--wait` option, this operation will only exit once these tasks have completed +. +### loadbalancer addport +Use the `addport` subcommand to add a new port configuration to an existing Layer0 load balancer. + +#### Usage +``` +l0 loadbalancer addport [--certificate certificateName] loadBalancerName port +``` + +#### Required parameters +* `loadBalancerName` - The name of an existing Layer0 load balancer in which you want to add the port configuration. +* `port` - The port configuration for the listener of the load balancer. Valid pattern is `hostPort:containerPort/protocol`. + * `hostPort` - The port that the load balancer will listen for traffic on. + * `containerPort` - The port that the load balancer will forward traffic to. + * `protocol` - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS). + +#### Optional arguments +* `--certificate certificateName` - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration. + +#### Additional information +The port configuration you specify must not already be in use by the load balancer you specify. + +### loadbalancer dropport +Use the `dropport` subcommand to remove a port configuration from an existing Layer0 load balancer. + +#### Usage +``` +l0 loadbalancer dropport loadBalancerName hostPort +``` + +#### Required parameters +* `loadBalancerName`- The name of an existing Layer0 load balancer from which you want to remove the port configuration. +* `hostPort`- The host port to remove from the load balancer. + +### loadbalancer get +Use the `get` subcommand to display information about an existing Layer0 load balancer. + +#### Usage +``` +l0 loadbalancer get [environmentName:]loadBalancerName +``` + +#### Required parameters +* `[environmentName:]loadBalancerName` - The name of an existing Layer0 load balancer. You can optionally provide the Layer0 environment (`environmentName`) associated with the Load Balancer + +#### Additional information +The `get` subcommand supports wildcard matching: `l0 loadbalancer get entrypoint*` would return all jobs beginning with `entrypoint`. + +### loadbalancer list +Use the `list` subcommand to display information about all of the existing load balancers in an instance of Layer0. + +#### Usage +``` +l0 loadbalancer list +``` + +### loadbalancer healthcheck +Use the `healthcheck` subcommand to display information about or update the configuration of a load balancer's health check. + +#### Usage +``` +l0 loadbalancer healthcheck [--set-target target | --set-interval interval | + --set-timeout timeout | --set-healthy-threshold healthyThreshold | + --set-unhealthy-threshold unhealthyThreshold] loadbalancerName +``` + +#### Required parameters +* `loadBalancerName` - The name of the existing Layer0 load balancer you are modifying. + +#### Optional arguments +* `--set-target target` - The target of the check. Valid pattern is `PROTOCOL:PORT/PATH`. + * If `PROTOCOL` is `HTTP` or `HTTPS`, both `PORT` and `PATH` are required. Example: `HTTP:80/admin/healthcheck`. + * If `PROTOCOL` is `TCP` or `SSL`, `PORT` is required and `PATH` is not used. Example: `TCP:80` +* `--set-interval interval` - The interval between health checks. +* `--set-timeout timeout` - The length of time in seconds before the health check times out. +* `--set-healthy-threshold healthyThreshold` - The number of checks before the instance is declared healthy. +* `--set-unhealthy-threshold unhealthyThreshold` - The number of checks before the instance is declared unhealthy. + +#### Additional information +Calling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged. --- ## Service A service is a component of a Layer0 environment. The purpose of a service is to execute a Docker image specified in a [deploy](#deploy). In order to create a service, you must first create an [environment](#environment) and a [deploy](#deploy); in most cases, you should also create a [load balancer](#loadbalancer) before creating the service. -The **service** command is used with the following subcommands: [create](#service-create), [delete](#service-delete), [get](#service-get), [update](#service-update), [list](#service-list), [logs](#service-logs), and [scale](#service-scale). - -###service create -Use the **create** subcommand to create a Layer0 service. - -####Usage -
-
-
**l0 service create** [--loadbalancer _environmentName:loadBalancerName_ ] [--no-logs] _environmentName serviceName deployName:deployVersion_
-
-
- -####Required parameters -
-
-
_serviceName_
-
A name for the service that you are creating.
-
-
-
_environmentName_
-
The name of an existing Layer0 environment.
-
-
-
_deployName_
-
The name of a Layer0 deploy that exists in the environment _environmentName_.
-
-
-
_deployVersion_
-
The version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used.
-
-
- -####Optional arguments -
-
-
--loadbalancer _environmentName:loadBalancerName_
-
Place the new service behind an existing load balancer named _loadBalancerName_ in the environment named _environmentName_.
-
-
-
--no-logs
-
Disable cloudwatch logging for the service
-
-
+The `service` command is used with the following subcommands: [create](#service-create), [delete](#service-delete), [get](#service-get), [update](#service-update), [list](#service-list), [logs](#service-logs), and [scale](#service-scale). + +### service create +Use the `create` subcommand to create a Layer0 service. + +#### Usage +``` +l0 service create [--loadbalancer [environmentName:]loadBalancerName | + --no-logs] environmentName serviceName deployName[:deployVersion] +``` + +#### Required parameters +* `serviceName` - A name for the service that you are creating. +* `environmentName` - The name of an existing Layer0 environment. +* `deployName[:deployVersion]` - The name of a Layer0 deploy that exists in the environment `environmentName`. You can optionally specify the version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used. + +#### Optional arguments +* `--loadbalancer [environmentName:]loadBalancerName` - Place the new service behind an existing load balancer `loadBalancerName`. You can optionally specify the Layer0 environment (`environmentName`) where the load balancer exists. +* `--no-logs` - Disable cloudwatch logging for the service ### service update -Use the **update** subcommand to apply an existing Layer0 Deploy to an existing Layer0 service. +Use the `update` subcommand to apply an existing Layer0 Deploy to an existing Layer0 service. + +#### Usage +``` +l0 service update [--no-logs] [environmentName:]serviceName deployName[:deployVersion] +``` + +#### Required parameters +* `[environmentName:]serviceName` - The name of an existing Layer0 service into which you want to apply the deploy. You can optionally specify the Layer0 environment (`environmentName`) of the service. +* `deployName[:deployVersion]` - The name of the Layer0 deploy that you want to apply to the service. You can optionally specify a specific version of the deploy (`deployVersion`). If you do not specify a version number, the latest version of the deploy will be applied. + +#### Optional arguments +* `--no-logs` - Disable cloudwatch logging for the service + +#### Additional information +If your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead. + +### service delete +Use the `delete` subcommand to delete an existing Layer0 service. #### Usage -
-
-
**l0 service update** [--no-logs] _environmentName:serviceName deployName:deployVersion_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the Layer0 environment in which the service resides.
-
-
-
_serviceName_
-
The name of an existing Layer0 service into which you want to apply the deploy.
-
-
-
_deployName_
-
The name of the Layer0 deploy that you want to apply to the service.
-
-
-
_deployVersion_
-
The version of the Layer0 deploy that you want to apply to the service. If you do not specify a version number, the latest version of the deploy will be applied.
-
-
-
--no-logs
-
Disable cloudwatch logging for the service
-
-
- -####Additional information - -
-
-
If your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead.
-
-
- - -###service delete -Use the **delete** subcommand to delete an existing Layer0 service. +``` +l0 service delete [--wait] [environmentName:]serviceName +``` + +#### Required parameters +* `[environmentName:]serviceName` - The name of the Layer0 service that you want to delete. You can optionally provide the Layer0 environment (`environmentName`) of the service. + +#### Optional arguments +* `--wait` - Wait until the deletion is complete before exiting. + +#### Additional information +This operation performs several tasks asynchronously. When run without the `--wait` option, this operation will most likely exit before all of these tasks are complete; when run with the `--wait` option, this operation will only exit once these tasks have completed. + +### service get +Use the `get` subcommand to display information about an existing Layer0 service. + +#### Usage +``` +l0 service get [environmentName:]serviceName +``` + +#### Required parameters +* `[environmentName:]serviceName` - The name of an existing Layer0 service. You can optionally provide the Layer0 environment (`environmentName`) of the service. + +### service list +Use the `list` subcommand to list all of the existing services in your Layer0 instance. #### Usage -
-
-
**l0 service delete** [--wait] _environmentName:serviceName_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the Layer0 environment that contains the service you want to delete.
-
-
-
_serviceName_
-
The name of the Layer0 service that you want to delete.
-
-
- -####Optional arguments -
-
-
--wait
-
Wait until the deletion is complete before exiting.
-
-
- -####Additional information -
-
-
This operation performs several tasks asynchronously. When run without the _--wait_ option, this operation will most likely exit before all of these tasks are complete; when run with the _--wait_ option, this operation will only exit once these tasks have completed.
-
-
- -###service get -Use the **get** subcommand to display information about an existing Layer0 service. - -####Usage -
-
-
**l0 service get** _environmentName:serviceName_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of an existing Layer0 environment.
-
-
-
_serviceName_
-
The name of an existing Layer0 service.
-
-
- -###service list -Use the **list** subcommand to list all of the existing services in your Layer0 instance. - -####Usage -
-
-
**l0 service list**
-
-
+``` +l0 service get list +``` ### service logs -Use the **logs** subcommand to display the logs from a Layer0 service that is currently running. - -####Usage -
-
-
**l0 service logs** [--start *MM/DD HH:MM*] [--end *MM/DD HH:MM*] [--tail=*N* ] _serviceName_
-
-
- -####Required parameters -
-
-
_serviceName_
-
The name of the Layer0 service for which you want to view logs.
-
-
- -####Optional arguments -
-
-
--start *MM/DD HH:MM*
-
The start of the time range to fetch logs.
-
-
-
--end *MM/DD HH:MM*
-
The end of the time range to fetch logs.
-
-
-
--tail=*N*
-
Display only the last _N_ lines of the log.
-
-
+Use the `logs` subcommand to display the logs from a Layer0 service that is currently running. + +#### Usage +``` +l0 service logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] serviceName +``` + +#### Required parameters +* `serviceName` - The name of the Layer0 service for which you want to view logs. + + +#### Optional arguments +* `--start MM/DD HH:MM` - The start of the time range to fetch logs. +* `--end MM/DD HH:MM` - The end of the time range to fetch logs. +* `--tail=N` - Display only the last `N` lines of the log. ### service scale -Use the **scale** subcommand to specify how many copies of an existing Layer0 service should run. - -####Usage -
-
-
**l0 service scale** _environmentName:serviceName N_
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the Layer0 environment that contains the service that you want to scale.
-
-
-
_serviceName_
-
The name of the Layer0 service that you want to scale up.
-
-
-
_N_
-
The number of copies of the specified service that should be run.
-
-
+Use the `scale` subcommand to specify how many copies of an existing Layer0 service should run. + +#### Usage +``` +l0 service scale [environmentName:]serviceName copies +``` + +#### Required parameters +* `[environmentName:]serviceName` - The name of the Layer0 service that you want to scale up. You can optionally provide the Layer0 environment (`environmentName`) of the service. +* `copies` - The number of copies of the specified service that should be run. --- ## Task A Layer0 task is a component of an environment. A task executes the contents of a Docker image, as specified in a deploy. A task differs from a service in that a task does not restart after exiting. Additionally, ports are not exposed when using a task. -The **task** command is used with the following subcommands: [create](#task-create), [delete](#task-delete), [get](#task-get), [list](#task-list), and [logs](#task-logs). +The `task` command is used with the following subcommands: [create](#task-create), [delete](#task-delete), [get](#task-get), [list](#task-list), and [logs](#task-logs). ### task create -Use the **create** subcommand to create a Layer0 task. +Use the `create` subcommand to create a Layer0 task. #### Usage -
-
-
**l0 task create** [--no-logs] [--copies _copies_] *environmentName taskName deployName*
-
-
- -####Required parameters -
-
-
_environmentName_
-
The name of the existing Layer0 environment in which you want to create the task.
-
-
-
_taskName_
-
A name for the task.
-
-
-
_deployName_
-
The name of an existing Layer0 deploy that the task should use.
-
-
- -####Optional arguments -
-
-
--copies
-
The number of copies of the task to run (default: 1)
-
-
-
--no-logs
-
Disable cloudwatch logging for the service
-
-
+``` +l0 task create [--copies copies | --no-logs] environmentName taskName deployName +``` + +#### Required parameters +* `environmentName` - The name of the existing Layer0 environment in which you want to create the task. +* `taskName` - A name for the task. +* `deployName` - The name of an existing Layer0 deploy that the task should use. + +#### Optional arguments +* `--copies copies` - The number of copies of the task to run (default: 1). +* `--no-logs` - Disable cloudwatch logging for the service. ### task delete -Use the **delete** subcommand to delete an existing Layer0 task. +Use the `delete` subcommand to delete an existing Layer0 task. #### Usage -
-
-
**l0 task delete** [*environmentName*:]*taskName*
-
-
- -####Required parameters -
-
-
_taskName_
-
The name of the Layer0 task that you want to delete.
-
-
- -####Optional parameters -
-
-
[_environmentName_:]
-
The name of the Layer0 environment that contains the task. This parameter is only necessary if multiple environments contain tasks with exactly the same name.
-
-
+``` +l0 task delete [environmentName:]taskName +``` + +#### Required parameters +* `[environmentName:]taskName` - The name of the Layer0 task that you want to delete. You can optionally specify the name of the Layer0 environment that contains the task. This parameter is only required if mulitiple environments contain tasks with exactly the same name. #### Additional information -
-
-
Until the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour.
-
-
+Until the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour. ### task get -Use the **get** subcommand to display information about an existing Layer0 task (_taskName_). +Use the `get` subcommand to display information about an existing Layer0 task (`taskName`). #### Usage -
-
-
**l0 task get** [*environmentName*:]*taskName*
-
-
- -####Required parameters -
-
-
_taskName_
-
The name of a Layer0 task for which you want to see information.
-
-
- -####Additional information -
-
-
The value of _taskName_ does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in _taskName_, then information about all matching tasks will be returned.
-
-
+``` +l0 task get [environmentName:]taskName +``` + +#### Required parameters +* `[environmentName:]taskName` - The name of a Layer0 task for which you want to see information. You can optionally specify the name of the Layer0 Environment that contains the task. + +#### Additional information +The value of `taskName` does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in `taskName`, then information about all matching tasks will be returned. ### task list -Use the **task** subcommand to display a list of running tasks in your Layer0. +Use the `task` subcommand to display a list of running tasks in your Layer0. #### Usage -
-
-
**l0 task list**
-
-
+``` +l0 task list +``` ### task logs -Use the **logs** subcommand to display logs for a running Layer0 task. +Use the `logs` subcommand to display logs for a running Layer0 task. #### Usage -
-
-
**l0 task logs** [--start *MM/DD HH:MM*] [--end *MM/DD HH:MM*] [--tail=*N* ] _taskName_
-
-
- -####Required parameters -
-
-
_taskName_
-
The name of an existing Layer0 task.
-
-
- -####Optional arguments -
-
-
--start *MM/DD HH:MM*
-
The start of the time range to fetch logs.
-
-
-
--end *MM/DD HH:MM*
-
The end of the time range to fetch logs.
-
-
-
--tail=*N*
-
Display only the last _N_ lines of the log.
-
-
- -####Additional information -
-
-
The value of _taskName_ does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in _taskName_, then information about all matching tasks will be returned.
-
-
+``` +l0 task logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] taskName +``` + +#### Required parameters +* `taskName` - The name of an existing Layer0 task. + +#### Optional arguments +* `--start MM/DD HH:MM` - The start of the time range to fetch logs. +* `--end MM/DD HH:MM` - The end of the time range to fetch logs. +* `--tail=N` - Display only the last `N` lines of the log. + +#### Additional information +The value of `taskName` does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in `taskName`, then information about all matching tasks will be returned. ### task list -Use the **task** subcommand to display a list of running tasks in your Layer0. +Use the `list` subcommand to display a list of running tasks in your Layer0. #### Usage -
-
-
**l0 task list**
-
-
-======= +``` +l0 task list +``` diff --git a/docs-src/docs/reference/consul.md b/docs-src/docs/reference/consul.md index 26c29a546..9e8ad124e 100644 --- a/docs-src/docs/reference/consul.md +++ b/docs-src/docs/reference/consul.md @@ -24,8 +24,6 @@ Layer0 Services that use Consul will run Registrator alongside their application Layer0 Services that use Consul will need to add the [Registrator](#registrator-container-definition) and [Consul Agent](#consul-agent-container-definition) definitions to the `containerDefinitions` section of your Deploys. You must also add the [Docker Socket](#docker-socket-volume-definition) definition to the `volumes` section of your Deploys. -For an example of a Deploy that uses Consul, see the [Guestbook with Consul](/guides/guestbook_consul) guide. - --- ## Registrator Container Definition ``` diff --git a/docs-src/docs/reference/setup-cli.md b/docs-src/docs/reference/setup-cli.md index 206bfd297..18d135a24 100644 --- a/docs-src/docs/reference/setup-cli.md +++ b/docs-src/docs/reference/setup-cli.md @@ -1,134 +1,120 @@ # Layer0 Setup Reference -The Layer0 Setup application (commonly called **l0-setup**), is used to provision, update, and destroy Layer0 instances. +The Layer0 Setup application (commonly called `l0-setup`), is used for administrative tasks on Layer0 instances. ---- -## General Usage -You can use the `-h, --help` command to get generate information about the `l0-setup` tool: +## Global options + +`l0-setup` can be used with one of several commands: [init](#init), [plan](#plan), [apply](#apply), [list](#list), [push](#push), [pull](#pull), [endpoint](#endpoint), [destroy](#destroy), [upgrade](#upgrade), and [set](#set). These commands are detailed in teh sections below. There are, however, some global paramters that you may specify whenever using `l0-setup` + +### Usage +``` +l0-setup [global options] command [command options] params +``` + +### Global options +* `-l value, --log value` - The log level to display on the console when you run commands. (default: info) +* `--version` - Display the version number of the `l0-setup` application. --- + ## Init -The **init** command is used to initialize or reconfigure a Layer0 instance. +The `init` command is used to initialize or reconfigure a Layer0 instance. This command will prompt the user for inputs required to create/update a Layer0 instance. Each of the inputs can be specified through an optional flag. ### Usage ``` -$ l0-setup init [options] +l0-setup init [--docker-path path | --module-source path | + --version version | --aws-region region | --aws-access-key accessKey | + --aws-secret-key secretKey] instanceName ``` -### Options -* `--docker-path` - Path to docker config.json file. -This is used to include private Docker Registry authentication for this Layer0 instance. -* `--module-source` - The source input variable is the path to the Terraform Layer0. -By default, this points to the Layer0 github repository. -Using values other than the default may result in undesired consequences. -* `--version` - The version input variable specifies the tag to use for the Layer0 -Docker images: `quintilesims/l0-api` and `quintilesims/l0-runner`. -* `--aws-access-key` - The access_key input variable is used to provision the AWS resources -required for Layer0. -This corresponds to the Access Key ID portion of an AWS Access Key. -It is recommended this key has the `AdministratorAccess` policy. -* `--aws-secret-key` The secret_key input variable is used to provision the AWS resources -required for Layer0. -This corresponds to the Secret Access Key portion of an AWS Access Key. -It is recommended this key has the `AdministratorAccess` policy. -* `--aws-region` - The region input variable specifies which region to provision the -AWS resources required for Layer0. The following regions can be used: - - us-west-1 - - us-west-2 - - us-east-1 - - eu-west-1 - - -* `--aws-ssh-key-pair` - The ssh_key_pair input variable specifies the name of the -ssh key pair to include in EC2 instances provisioned by Layer0. -This key pair must already exist in the AWS account. -The names of existing key pairs can be found in the EC2 dashboard. +### Optional arguments +* `--docker-path` - Path to docker config.json file. This is used to include private Docker Registry authentication for this Layer0 instance. +* `--module-source` - The source input variable is the path to the Terraform Layer0. By default, this points to the Layer0 github repository. Using values other than the default may result in undesired consequences. +* `--version` - The version input variable specifies the tag to use for the Layer0 Docker images: `quintilesims/l0-api` and `quintilesims/l0-runner`. +* `--aws-ssh-key-pair` - The ssh_key_pair input variable specifies the name of the ssh key pair to include in EC2 instances provisioned by Layer0. This key pair must already exist in the AWS account. The names of existing key pairs can be found in the EC2 dashboard. +* `--aws-access-key` - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. +* `--aws-secret-key` - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. --- + ## Plan -The **plan** command is used to show the planned operation(s) to run during the next `apply` on a Layer0 instance without actually executing any actions +The `plan` command is used to show the planned operation(s) to run during the next `apply` on a Layer0 instance without actually executing any actions ### Usage ``` -$ l0-setup plan +l0-setup plan instanceName ``` -### Options -There are no options for this command - --- + ## Apply -The **apply** command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the `--push=false` flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional `--aws-*` flags, are read from the environment variables or a credentials file. +The `apply` command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the `--push=false` flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional `--aws-*` flags, are read from the environment variables or a credentials file. ### Usage ``` -$ l0-setup apply [options] +l0-setup apply [--quick | --push=false | --aws-access-key accessKey | + --aws-secret-key secretKey] instanceName ``` -### Options +### Optional arguments * `--quick` - Skips verification checks that normally run after `terraform apply` has completed -* `--push` - Skips uploading local Layer0 configuration files to an S3 bucket -* `--aws-access-key` - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-secret-key` - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-region` - The region of the Layer0 instance. The default value is `us-west-2`. - +* `--push=false` - Skips uploading local Layer0 configuration files to an S3 bucket +* `--aws-access-key` - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. +* `--aws-secret-key` - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. --- + ## List -The **list** command is used to list local and remote Layer0 instances. +The `list` command is used to list local and remote Layer0 instances. ### Usage ``` -$ l0-setup list [options] +l0-setup list [--local=false | --remote=false | --aws-access-key accessKey | + --aws-secret-key secretKey] ``` -### Options +### Optional arguments * `-l, --local` - Show local Layer0 instances. This value is true by default. * `-r, --remote` - Show remote Layer0 instances. This value is true by default. -* `--aws-access-key` - The Access Key ID portion of an AWS Access Key that has permissions to list S3 buckets. -If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-secret-key` - The Secret Access Key portion of an AWS Access Key that has permissions to list S3 buckets. -If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-region` - The region to list S3 buckets. The default value is `us-west-2`. --- ## Push -The **push** command is used to back up your Layer0 configuration files to an S3 bucket. +The `push` command is used to back up your Layer0 configuration files to an S3 bucket. ### Usage ``` -$ l0-setup push [options] +l0-setup push [--aws-access-key accessKey | + --aws-secret-key secretKey] instanceName ``` -### Options -* `--aws-access-key` - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-secret-key` - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-region` - The region of the Layer0 instance. The default value is `us-west-2`. +### Optional arguments +* `--aws-access-key` - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. +* `--aws-secret-key` - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. --- ## Pull -The **pull** command is used copy Layer0 configuration files from an S3 bucket. +The `pull` command is used copy Layer0 configuration files from an S3 bucket. ### Usage ``` -$ l0-setup pull [options] +l0-setup pull [--aws-access-key accessKey | + --aws-secret-key secretKey] instanceName ``` -### Options -* `--aws-access-key` - The Access Key ID portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-secret-key` - The Secret Access Key portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. -* `--aws-region` - The region of the Layer0 instance. The default value is `us-west-2`. +### Optional arguments +* `--aws-access-key` - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. +* `--aws-secret-key` - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the `AdministratorAccess` policy. --- ## Endpoint -The **endpoint** command is used to show environment variables used to connect to a Layer0 instance +The `endpoint` command is used to show environment variables used to connect to a Layer0 instance ### Usage ``` -$ l0-setup endpoint [options] +l0-setup endpoint [-i | -d | -s syntax] instanceName ``` -### Options +### Optional arguments * `-i, --insecure` - Show environment variables that allow for insecure settings * `-d, --dev` - Show environment variables that are required for local development * `-s --syntax` - Choose the syntax to display environment variables @@ -136,51 +122,49 @@ $ l0-setup endpoint [options] --- ## Destroy -The **destroy** command is used to destroy all resources associated with a Layer0 instance. +The `destroy` command is used to destroy all resources associated with a Layer0 instance. -!!! warning "Caution" - Destroying a Layer0 instance cannot be undone; if you created backups of your Layer0 configuration using the **push** command, those backups will also be deleted when you run the **destroy** command. +!!! danger "Caution" + Destroying a Layer0 instance cannot be undone. If you created backups of your Layer0 configuration using the `push` command, those backups will also be deleted when you run the `destroy` command. ### Usage ``` -$ l0-setup destroy [options] +l0-setup destroy [--force] instanceName ``` -### Options +### Optional arguments * `--force` - Skips confirmation prompt - --- ## Upgrade -The **upgrade** command is used to upgrade a Layer0 instance to a new version. -You will need to run an **apply** after this command has completed. +The `upgrade` command is used to upgrade a Layer0 instance to a new version. +You will need to run an `apply` after this command has completed. ### Usage ``` -$ l0-setup upgrade [options] +l0-setup upgrade [--force] instanceName version ``` -### Options +### Optional arguments * `--force` - Skips confirmation prompt - --- ## Set -The **set** command is used set input variable(s) for a Layer0 instance's Terraform module. -This command can be used to shorthand the **init** and **upgrade** commands, -and can also be used with custom Layer0 modules. -You will need to run an **apply** after this command has completed. +The `set` command is used set input variable(s) for a Layer0 instance's Terraform module. +This command can be used to shorthand the `init` and `upgrade` commands, and can also be used with custom Layer0 modules. +You will need to run an `apply` after this command has completed. ### Usage ``` -$ l0-setup set [options] +l0-setup set [--input key=value] instanceName ``` -**Example Usage** +### Options +* `--input key=val` - Specify an input using `key=val` format + +### Example Usage ``` -$ l0-setup set --input username=admin --input password=pass123 mylayer0 +l0-setup set --input username=admin --input password=pass123 mylayer0 ``` -### Options -* `--input` - Specify an input using `key=val` format diff --git a/docs-src/docs/reference/task_definition.md b/docs-src/docs/reference/task_definition.md index b35fc8fea..8a316188c 100644 --- a/docs-src/docs/reference/task_definition.md +++ b/docs-src/docs/reference/task_definition.md @@ -7,7 +7,7 @@ For more comprehensive documentation, we recommend taking a look at the official ## Sample -The following snippet contains the task definition for the [Guestbook](/guides/guestbook) application +The following snippet contains the task definition for the [Guestbook](../guides/walkthrough/deployment-1) application ``` { "AWSEBDockerrunVersion": 2, diff --git a/docs-src/docs/reference/terraform-plugin.md b/docs-src/docs/reference/terraform-plugin.md index 63231ef96..658323fdf 100644 --- a/docs-src/docs/reference/terraform-plugin.md +++ b/docs-src/docs/reference/terraform-plugin.md @@ -1,13 +1,13 @@ # Layer0 Terraform Provider Reference Terraform is an open-source tool for provisioning and managing infrastructure. -If you are new to Terraform, we recommend checking out their [documentation](https://www.Terraform.io/intro/index.html). +If you are new to Terraform, we recommend checking out their [documentation](https://www.terraform.io/intro/index.html). -Layer0 has built a custom [provider](https://www.Terraform.io/docs/providers/index.html) for Layer0. +Layer0 has built a custom [provider](https://www.terraform.io/docs/providers/index.html) for Layer0. This provider allows users to create, manage, and update Layer0 entities using Terraform. ## Prerequisites -- **Terraform v0.9.4+** ([download](https://www.Terraform.io/downloads.html)), accessible in your system path. +- **Terraform v0.11+** ([download](https://www.terraform.io/downloads.html)), accessible in your system path. ## Install Download a Layer0 v0.8.4+ [release](/releases). @@ -18,16 +18,18 @@ For further information, see Terraform's documentation on installing a Terraform ## Getting Started -* Checkout the `Terraform` section of the Guestbook walkthrough [here](/guides/guestbook#terraform). +* Checkout the `Terraform` section of the Guestbook walkthrough [here](../guides/walkthrough/deployment-1/#deploy-with-terraform). * We've added some tips and links to helpful resources in the [Best Practices](#best-practices) section below. --- -##Provider +## Provider + The Layer0 provider is used to interact with a Layer0 API. The provider needs to be configured with the proper credentials before it can be used. ### Example Usage + ``` # Add 'endpoint' and 'token' variables variable "endpoint" {} @@ -40,14 +42,13 @@ provider "layer0" { token = "${var.token}" skip_ssl_verify = true } - ``` ### Argument Reference The following arguments are supported: -!!! note "Configuration" - The `endpoint` and `token` variables for your layer0 api can be found using the [l0-setup endpoint](/reference/setup-cli/#endpoint) command +!!! note + The `endpoint` and `token` variables for your layer0 api can be found using the [l0-setup endpoint](setup-cli/#endpoint) command * `endpoint` - (Required) The endpoint of the layer0 api * `token` - (Required) The authentication token for the layer0 api @@ -59,6 +60,7 @@ The following arguments are supported: The API data source is used to extract useful read-only variables from the Layer0 API. ### Example Usage + ``` # Configure the api data source data "layer0_api" "config" {} @@ -70,6 +72,7 @@ output "vpc id" { ``` ### Attribute Reference + The following attributes are exported: * `prefix` - The prefix of the layer0 instance @@ -80,9 +83,11 @@ The following attributes are exported: --- ##Deploy Data Source + The Deploy data source is used to extract Layer0 Deploy attributes. ### Example Usage + ``` # Configure the deploy data source data "layer0_deploy" "dpl" { @@ -97,12 +102,14 @@ output "deploy_id" { ``` ### Argument Reference + The following arguments are supported: * `name` - (Required) The name of the deploy * `version` - (Required) The version of the deploy ### Attribute Reference + The following attributes are exported: * `name` - The name of the deploy @@ -111,10 +118,12 @@ The following attributes are exported: --- -##Environment Data Source +## Environment Data Source + The Environment data source is used to extract Layer0 Environment attributes. ### Example Usage + ``` # Configure the environment data source data "layer0_environment" "env" { @@ -128,11 +137,13 @@ output "environment_id" { ``` ### Argument Reference + The following arguments are supported: * `name` - (Required) The name of the environment ### Attribute Reference + The following attributes are exported: * `id` - The id of the environment @@ -144,10 +155,12 @@ The following attributes are exported: --- -##Load Balancer Data Source +## Load Balancer Data Source + The Load Balancer data source is used to extract Layer0 Load Balancer attributes. ### Example Usage + ``` # Configure the load balancer source data "layer0_load_balancer" "lb" { @@ -162,12 +175,14 @@ output "load_balancer_id" { ``` ### Argument Reference + The following arguments are supported: * `name` - (required) The name of the load balancer * `environment_id` - (required) The id of the environment the load balancer exists in ### Attribute Reference + The following attributes are exported: * `id` - The id of the load balancer @@ -179,10 +194,12 @@ The following attributes are exported: --- -##Service Data Source +## Service Data Source + The Service data source is used to extract Layer0 Service attributes. ### Example Usage + ``` # Configure the service data source data "layer0_service" "svc" { @@ -197,12 +214,14 @@ output "service_id" { ``` ### Argument Reference + The following arguments are supported: * `name` - (required) The name of the service * `environment_id` - (required) The id of the environment the service exists in ### Attribute Reference + The following attributes are exported: * `id` - The id of the service @@ -213,13 +232,15 @@ The following attributes are exported: --- -##Deploy Resource +## Deploy Resource + Provides a Layer0 Deploy. Performing variable substitution inside of your deploy's json file (typically named `Dockerrun.aws.json`) can be done through Terraform's [template_file](https://www.terraform.io/docs/providers/template/). -For a working example, please see the sample [Guestbook](https://github.com/quintilesims/guides/blob/master/guestbook/layer0.tf) application +For a working example, please see the sample [Guestbook](https://github.com/quintilesims/guides/blob/master/guestbook/module/main.tf) application ### Example Usage + ``` # Configure the deploy template data "template_file" "guestbook" { @@ -237,12 +258,14 @@ resource "layer0_deploy" "guestbook" { ``` ### Argument Reference + The following arguments are supported: * `name` - (Required) The name of the deploy * `content` - (Required) The content of the deploy ### Attribute Reference + The following attributes are exported: * `id` - The id of the deploy @@ -256,6 +279,7 @@ The following attributes are exported: Provides a Layer0 Environment ### Example Usage + ``` # Create a new environment resource "layer0_environment" "demo" { @@ -269,6 +293,7 @@ resource "layer0_environment" "demo" { ``` ### Argument Reference + The following arguments are supported: * `name` - (Required) The name of the environment @@ -283,6 +308,7 @@ Options are "linux" or "windows". If not specified, Layer0 will use its default AMI ID for the specified operating system. ### Attribute Reference + The following attributes are exported: * `id` - The id of the environment @@ -299,6 +325,7 @@ The following attributes are exported: Provides a Layer0 Load Balancer ### Example Usage + ``` # Create a new load balancer resource "layer0_load_balancer" "guestbook" { @@ -330,13 +357,14 @@ resource "layer0_load_balancer" "guestbook" { ``` ### Argument Reference + The following arguments are supported: * `name` - (Required) The name of the load balancer * `environment` - (Required) The id of the environment to place the load balancer inside of * `private` - (Optional) If true, the load balancer will not be exposed to the public internet * `port` - (Optional, Default: 80:80/tcp) A list of port blocks. Ports documented below -* `health_check` - (Optional, Default: {"TCP:80" 30 5 2 2}) A health_check block. Health check documented below +* `health_check` - (Optional, Default: `{"TCP:80" 30 5 2 2}`) A health_check block. Health check documented below Ports (`port`) support the following: @@ -347,15 +375,16 @@ Ports (`port`) support the following: Healthcheck (`health_check`) supports the following: -* `target` - (Required) The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL values are: - * `HTTP`, `HTTPS` - PORT and PATH are required - * `TCP`, `SSL` - PORT is required, PATH is not supported +* `target` - (Required) The target of the check. Valid pattern is `PROTOCOL:PORT/PATH`, where `PROTOCOL` values are: + * `HTTP`, `HTTPS` - `PORT` and `PATH` are required + * `TCP`, `SSL` - `PORT` is required, `PATH` is not supported * `interval` - (Required) The interval between checks. * `timeout` - (Required) The length of time before the check times out. * `healthy_threshold` - (Required) The number of checks before the instance is declared healthy. * `unhealthy_threshold` - (Required) The number of checks before the instance is declared unhealthy. ### Attribute Reference + The following attributes are exported: * `id` - The id of the load balancer @@ -371,6 +400,7 @@ The following attributes are exported: Provides a Layer0 Service ### Example Usage + ``` # Create a new service resource "layer0_service" "guestbook" { @@ -383,6 +413,7 @@ resource "layer0_service" "guestbook" { ``` ### Argument Reference + The following arguments are supported: * `name` - (Required) The name of the service @@ -407,10 +438,10 @@ The following attributes are exported: * Always run `Terraform plan` before `terraform apply`. This will show you what action(s) Terraform plans to make before actually executing them. -* Use [variables](https://www.Terraform.io/intro/getting-started/variables.html) to reference secrets. -Secrets can be placed in a file named `Terraform.tfvars`, or by setting `TF_VAR_*` environment variables. -More information can be found [here](https://www.Terraform.io/intro/getting-started/variables.html). +* Use [variables](https://www.terraform.io/intro/getting-started/variables.html) to reference secrets. +Secrets can be placed in a file named `terraform.tfvars`, or by setting `TF_VAR_*` environment variables. +More information can be found [here](https://www.terraform.io/intro/getting-started/variables.html). * Use Terraform's `remote` command to backup and sync your `terraform.tfstate` file across different members in your organization. -Terraform has documentation for using S3 as a backend [here](https://www.Terraform.io/docs/state/remote/s3.html). -* Terraform [modules](https://www.Terraform.io/intro/getting-started/modules.html) allow you to define and consume reusable components. +Terraform has documentation for using S3 as a backend [here](https://www.terraform.io/docs/backends/types/s3.html). +* Terraform [modules](https://www.terraform.io/intro/getting-started/modules.html) allow you to define and consume reusable components. * Example configurations can be found [here](https://github.com/hashicorp/Terraform/tree/master/examples) diff --git a/docs-src/docs/reference/updateservice.md b/docs-src/docs/reference/updateservice.md index 1f2f9bdce..0547b3064 100644 --- a/docs-src/docs/reference/updateservice.md +++ b/docs-src/docs/reference/updateservice.md @@ -11,30 +11,89 @@ The disadvantage of using this method is that you cannot perform A/B testing of **To replace a Deploy to refer to a new task definition:** -1. At the command line, type the following to create a new Deploy:
```l0 deploy create [pathToTaskDefinition] [deployName]```
Note that if ```[deployName]``` already exists, this step will create a new version of that Deploy. -2. Type the following to update the existing Service:
```l0 service update [existingServiceName] [deployName]```
By default, the Service you specify in this command will refer to the latest version of ```[deployName]```, if multiple versions of the Deploy exist.

Note


If you want to refer to a specific version of the Deploy, type the following command instead of the one shown above: l0 service update [serviceName] [deployName]:[deployVersion]

+At the command line, type the following to create a new Deploy: + +``` +l0 deploy create taskDefPath deployName +``` + +`taskDefPath` is the path to the ECS Task Definition. Note that if `deployName` already exists, this step will create a new version of that Deploy. + +Use [l0 service update](cli/#service-update) to update the existing service: + +``` +l0 service update serviceName deployName[:deployVersion] +``` + +By default, the service name you specify in this command will refer to the latest version of `deployName`. You can optionally specify a specific version of the deploy, as shown above. ## Method 2: Create a new Deploy and Service using the same Loadbalancer -This method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the ```l0 service scale``` command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates. +This method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the [l0 service scale](cli/#service-scale) command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates. The disadvantage of using this method is that you cannot control the routing of traffic between the old and new versions of the application. **To create a new Deploy and Service:** -1. At the command line, type the following to create a new Deploy (or a new version of the Deploy, if ```[deployName]``` already exists):
```l0 deploy create [pathToTaskDefinition] [deployName]``` -2. Type the following command to create a new Service that refers to ```[deployName]``` behind an existing Loadbalancer named ```[loadbalancerName]```:
```l0 service create --loadbalancer [loadbalancerName] [environmentName] [deployName]``` -3. Check to make sure that the new Service is working as expected. If it is, and you do not want to keep the old Service, type the following command to delete the old Service: ```l0 service delete [oldServiceName]``` +At the command line, type the following to create a new deploy or a new version of a deploy: + +``` +l0 deploy create taskDefPath deployName +``` + +`taskDefPath` is the path to the ECS Task Definition. Note that if `deployName` already exists, this step will create a new version of that Deploy. + +Use [l0 service create](cli/#service-create) to create a new service that uses `deployName` behind an existing load balancer named `loadBalancerName` + +``` +l0 service create --loadbalancer [environmentName:]loadBalancerName environmentName serviceName deployName[:deployVersion] +``` + +By default, the service name you specify in this command will refer to the latest version of `deployName`. You can optionally specify a specific version of the deploy, as shown above. You can also optionally specify the name of the environment, `environmentName` where the load balancer exists. + +Check to make sure that the new service is working as expected. If it is, and you do not want to keep the old service, delete the old service: + +``` +l0 service delete service +``` ## Method 3: Create a new Deploy, Loadbalancer and Service -The final method of updating a Layer0 service is to create an entirely new Deploy, Loadbalancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services. +The final method of updating a Layer0 service is to create an entirely new Deploy, Load Balancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services. + +The disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Load Balancer. + +**To create a new Deploy, Load Balancer and Service:** + +Type the following command to create a new Deploy: + +``` +l0 deploy create taskDefPath deployName +``` + +`taskDefPath` is the path to the ECS Task Definition. Note that if `deployName` already exists, this step will create a new version of that Deploy. + +Use [l0 loadbalancer create](cli/#loadbalancer-create) to create a new Load Balancer: + +``` +l0 loadbalancer create --port port environmentName loadBalancerName deployName +``` + +* `port` is the port configuration for the listener of the Load Balancer. Valid pattern is `hostPort:containerPort/protocol`. Multiple ports can be specified using `--port port1 --port port2 ...`. + * `hostPort` - The port that the load balancer will listen for traffic on. + * `containerPort` - The port that the load balancer will forward traffic to. + * `protocol` - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS). + +!!! note + The value of `loadbalancerName` in the above command must be unique to the Environment. + +Use [l0 service create](cli/#service-create) to create a new Service using the Load Balancer you just created: -The disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Loadbalancer. +``` +l0 service create --loadbalancer loadBalancerName environmentName serviceName deployName +``` -**To create a new Deploy, Loadbalancer and Service:** +!!! note + The value of `serviceName` in the above command must be unique to the Environment. -1. At the command line, type the following command to create a new Deploy:
```l0 deploy create [pathToTaskDefinition] [deployName]``` -2. Type the following command to create a new Loadbalancer:
```l0 loadbalancer create --port [portNumber] [environmentName] [loadbalancerName] [deployName]```

Note


The value of [loadbalancerName] in the above command must be unique.

-3. Type the following command to create a new Service:
```l0 service create --loadbalancer [loadBalancerName] [environmentName] [serviceName] [deployName]```

Note


The value of [serviceName] in the above command must be unique.

-4. Implement a method of routing traffic between the old and new Services, such as [HAProxy](http://www.haproxy.org) or [Consul](https://www.consul.io). +Implement a method of routing traffic between the old and new Services, such as [HAProxy](http://www.haproxy.org) or [Consul](https://www.consul.io). diff --git a/docs-src/docs/releases.md b/docs-src/docs/releases.md index c2d77bfe3..73411286b 100644 --- a/docs-src/docs/releases.md +++ b/docs-src/docs/releases.md @@ -1,5 +1,6 @@ | Version | macOS | Linux | Windows | | - | - | - | - | +| v0.10.4 | [macOS](https://s3.amazonaws.com/xfra-layer0/release/v0.10.4/layer0_v0.10.4_darwin.zip) | [Linux](https://s3.amazonaws.com/xfra-layer0/release/v0.10.4/layer0_v0.10.4_linux.zip) | [Windows](https://s3.amazonaws.com/xfra-layer0/release/v0.10.4/layer0_v0.10.4_windows.zip) | v0.10.3 | [macOS](https://s3.amazonaws.com/xfra-layer0/release/v0.10.3/layer0_v0.10.3_darwin.zip) | [Linux](https://s3.amazonaws.com/xfra-layer0/release/v0.10.3/layer0_v0.10.3_linux.zip) | [Windows](https://s3.amazonaws.com/xfra-layer0/release/v0.10.3/layer0_v0.10.3_windows.zip) | v0.10.2 | [macOS](https://s3.amazonaws.com/xfra-layer0/release/v0.10.2/layer0_v0.10.2_darwin.zip) | [Linux](https://s3.amazonaws.com/xfra-layer0/release/v0.10.2/layer0_v0.10.2_linux.zip) | [Windows](https://s3.amazonaws.com/xfra-layer0/release/v0.10.2/layer0_v0.10.2_windows.zip) | v0.10.1 | [macOS](https://s3.amazonaws.com/xfra-layer0/release/v0.10.1/layer0_v0.10.1_darwin.zip) | [Linux](https://s3.amazonaws.com/xfra-layer0/release/v0.10.1/layer0_v0.10.1_linux.zip) | [Windows](https://s3.amazonaws.com/xfra-layer0/release/v0.10.1/layer0_v0.10.1_windows.zip) diff --git a/docs-src/docs/setup/destroy.md b/docs-src/docs/setup/destroy.md index f67b77b79..299123d35 100644 --- a/docs-src/docs/setup/destroy.md +++ b/docs-src/docs/setup/destroy.md @@ -6,7 +6,7 @@ This section provides procedures for destroying (deleting) a Layer0 instance. In order to destroy a Layer0 instance, you must first delete all environments in the instance. List all environments with: ``` -$ l0 environment list +l0 environment list ``` For each environment listed in the previous step, with the exception of the environment named `api`, @@ -20,9 +20,10 @@ l0 environment delete --wait Once all environments have been deleted, the Layer0 instance can be deleted using the `l0-setup` tool. Run the following command (replacing `` with the name of the Layer0 instance): ``` -$ l0-setup destroy +l0-setup destroy ``` -The **destroy** command is idempotent; if it fails, it is safe to re-attempt multiple times. -If the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources `l0-setup` is attempting to destroy. -You will need to manually remove these dependencies in order to get the **destroy** command to complete successfully. +The `destroy` command is idempotent; if it fails, it is safe to re-attempt multiple times. + +!!! note + If the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources `l0-setup` is attempting to destroy. You will need to manually remove these dependencies in order to get the `destroy` command to complete successfully. diff --git a/docs-src/docs/setup/install.md b/docs-src/docs/setup/install.md index 67892097b..3dfee8cdc 100644 --- a/docs-src/docs/setup/install.md +++ b/docs-src/docs/setup/install.md @@ -4,15 +4,15 @@ Before you can install and configure Layer0, you must obtain the following: -* **An AWS account.** +* **Access to an AWS account** -* **An EC2 Key Pair.** +* **An EC2 Key Pair** This key pair allows you to access the EC2 instances running your Services using SSH. If you have already created a key pair, you can use it for this process. -Otherwise, follow the [instructions at aws.amazon.com](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html#having-ec2-create-your-key-pair) to create a new key pair. +Otherwise, [follow the AWS documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html#having-ec2-create-your-key-pair) to create a new key pair. Make a note of the name that you selected when creating the key pair. -* **Terraform v0.9.4+** +* **Terraform v0.11+** We use Terraform to create the resources that Layer0 needs. If you're unfamiliar with Terraform, you may want to check out our [introduction](/reference/terraform_introduction). If you're ready to install Terraform, there are instructions in the [Terraform documentation](https://www.terraform.io/intro/getting-started/install.html). @@ -20,7 +20,7 @@ If you're ready to install Terraform, there are instructions in the [Terraform d ## Part 1: Download and extract Layer0 1. In the [Downloads section of the home page](/index.html#download), select the appropriate installation file for your operating system. Extract the zip file to a directory on your computer. -2. (Optional) Place the **l0** and **l0-setup** binaries into your system path. +2. (Optional) Place the `l0` and `l0-setup` binaries into your system path. For more information about adding directories to your system path, see the following resources: * (Windows): [How to Edit Your System PATH for Easy Command Line Access in Windows](http://www.howtogeek.com/118594/how-to-edit-your-system-path-for-easy-command-line-access/) * (Linux/macOS): [Adding a Directory to the Path](http://www.troubleshooters.com/linux/prepostpath.htm) @@ -33,25 +33,34 @@ You will use the credentials created in this section when creating, updating, or 1. In a web browser, login to the [AWS Console](http://console.aws.amazon.com/). -2. Under **Security and Identity**, click **Identity and Access Management**. +2. Click the **Services** dropdown menu in the upper left portion of the console page, then type **IAM** in the text box that appears at the top of the page after you click **Services**. As you type IAM, a search result will appear below the text box. Click on the IAM service result that appears below the text box. -3. Click **Groups**, and then click **Administrators**.

Note


If the **Administrators** group does not already exist, complete the following steps:

  1. Click **Create New Group**. Name the new group "Administrators", and then click **Next Step**.
  2. Click **AdministratorAccess** to attach the Administrator policy to your new group.
  3. Click **Next Step**, and then click **Create Group**.
+3. In the left panel, click **Groups**, and then confirm that you have a group called **Administrators**. -4. Click **Users**. +!!! question "Is the Administrators group missing in your AWS account?" + If the **Administrators** group does not already exist, complete the following steps: + + * Click **Create New Group**. Name the new group **Administrators**, and then click **Next Step**. + + * Check the **AdministratorAccess** policy to attach the Administrator policy to your new group. + + * Click **Next Step**, and then click **Create Group**. -5. Click **Create New Users** and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to **Generate an Access Key for each user**, and then click **Create**. +4. In the left panel, click **Users**. -6. Once your user account has been created, click **Download Credentials** to save your access key to a CSV file. +5. Click the **New User** button and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to **Programmatic access**, and then click the **Next: Permissions** button. -7. In the Users list, click the user account you just created. Under **User Actions**, click **Add User to Groups**. +6. Make sure the **Add user to group** button is highlighted. Find and check the box next to the group **Administrators**. Click **Next: Review** button to continue. This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe! -8. Select the group **Administrators** and click **Add to Groups**. This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe! +7. Review your choices and then click the **Create user** button. + +8. Once your user account has been created, click the **Download .csv** button to save your access and secret key to a CSV file. ## Part 3: Create a new Layer0 Instance Now that you have downloaded Layer0 and configured your AWS account, you can create your Layer0 instance. From a command prompt, run the following (replacing `` with a name for your Layer0 instance): ``` -$ l0-setup init +l0-setup init ``` This command will prompt you for many different inputs. @@ -92,21 +101,20 @@ Please enter a value and press 'enter'. ... ``` -Once the **init** command has successfully completed, you're ready to actually create the resources needed to use Layer0. +Once the `init` command has successfully completed, you're ready to actually create the resources needed to use Layer0. Run the following command (again, replace `` with the name you've chosen for your Layer0 instance): ``` l0-setup apply ``` -The first time you run the **apply** command, it may take around 5 minutes to complete. +The first time you run the `apply` command, it may take around 5 minutes to complete. This command is idempotent; it is safe to run multiple times if it fails the first. -It's a good idea to run the **push** command (`l0-setup push `) after **apply** commands complete. -This will send a backup of your Layer0 instance's configuration and state to S3. -These files can be grabbed later using the **pull** command (`l0-setup pull `). +At the end of the `apply` command, your Layer0 instance's configuration and state will be automatically backed up to an S3 bucket. You can manually back up your configuration at any time using the `push` command. It's a good idea to run this command regularly (`l0-setup push `) to ensure that your configuration is backed up. +These files can be downloaded at any time using the `pull` command (`l0-setup pull `). -!!! note "Using a Private Docker Registry" +!!! info "Using a Private Docker Registry" **The procedures in this section are optional, but are highly recommended for production use.** If you require authentication to a private Docker registry, you will need a Docker configuration file present on your machine with access to private repositories (typically located at `~/.docker/config.json`). @@ -116,31 +124,31 @@ A configuration file will be generated at `~/.docker/config.json`. To add this authentication to your Layer0 instance, run: ``` -$ l0-setup init --docker-path= +l0-setup init --docker-path= ``` -This will add a rendered file into your Layer0 instance's directory at `~/.layer0//dockercfg.json`. +This will reconfigure your Layer0 configuration and add a rendered file into your Layer0 instance's directory at `~/.layer0//dockercfg.json`. -You can modify a Layer0 instance's `dockercfg.json` file and re-run the **apply** command (`l0-setup apply `) to make changes to your authentication. -Note that any EC2 instances created prior to changing your `dockercfg.json` file will need to be manually terminated since they only grab the authentication file during instance creation. +You can modify a Layer0 instance's `dockercfg.json` file and re-run the `apply` command (`l0-setup apply `) to make changes to your authentication. +**Note:** Any EC2 instances created prior to changing your `dockercfg.json` file will need to be manually terminated since they only grab the authentication file during instance creation. Terminated EC2 instances will be automatically re-created by autoscaling. -!!! note "Using an Existing VPC" - **The procedures in this section must be followed to properly install Layer0 into an existing VPC** +!!! warning "Using an Existing VPC" + **The procedures in this section must be followed precisely to properly install Layer0 into an existing VPC** -By default, l0-setup creates a new VPC to place resources. -However, l0-setup can place resources in an existing VPC if it meets the following conditions: +By default, `l0-setup` creates a new VPC to place resources. +However, `l0-setup` can place resources in an existing VPC if the VPC meets all of the following conditions: * Has access to the public internet (through a NAT instance or gateway) * Has at least 1 public and 1 private subnet * The public and private subnets have the tag `Tier: Public` or `Tier: Private`, respectively. For information on how to tag AWS resources, please visit the [AWS documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). -Once you are sure the existing VPC satisfies these requirements, run the **init** command, +Once you are sure the existing VPC satisfies these requirements, run the `init` command, placing the VPC ID when prompted: ``` -$ l0-setup init +l0-setup init ... VPC ID (optional): The vpc_id input variable specifies an existing AWS VPC to provision the AWS resources required for Layer0. If no input is specified, a new VPC will be @@ -157,25 +165,26 @@ Please enter a new value, or press 'enter' to keep the current value. Input: vpc123 ``` -Once the command has completed, it is safe to run **apply** to provision the resources. +Once the command has completed, it is safe to run [apply](../../reference/setup-cli#apply) to provision the resources. ## Part 4: Connect to a Layer0 Instance -Once the **apply** command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the **endpoint** command. +Once the `apply` command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the `endpoint` command. ``` -$ l0-setup endpoint --insecure +l0-setup endpoint --insecure export LAYER0_API_ENDPOINT="https://l0-instance_name-api-123456.us-west-2.elb.amazonaws.com" export LAYER0_AUTH_TOKEN="abcDEFG123" export LAYER0_SKIP_SSL_VERIFY="1" export LAYER0_SKIP_VERSION_VERIFY="1" ``` -The **--insecure** flag shows configurations that bypass SSL and version verifications. -This is required as the Layer0 API created uses a self-signed certificate by default. -These settings are **not** recommended for production use! +!!! danger + The `--insecure` flag shows configurations that bypass SSL and version verifications. + This is required as the Layer0 API created uses a self-signed SSL certificate by default. + These settings are **not** recommended for production use! -The **endpoint** command supports a `--syntax` option, which can be used to turn configuration into a single line: +The `endpoint` command supports a `--syntax` option, which can be used to turn configuration into a single line: -* Bash (default) - `$ eval "$(l0-setup endpoint --insecure )"` -* Powershell - `$ l0-setup endpoint --insecure --syntax=powershell | Out-String | Invoke-Expression` +* Bash (default) - `eval "$(l0-setup endpoint --insecure )"` +* Powershell - `l0-setup endpoint --insecure --syntax=powershell | Out-String | Invoke-Expression` diff --git a/docs-src/docs/setup/upgrade.md b/docs-src/docs/setup/upgrade.md index fd5dfe260..2ba33563a 100644 --- a/docs-src/docs/setup/upgrade.md +++ b/docs-src/docs/setup/upgrade.md @@ -3,22 +3,23 @@ This section provides procedures for upgrading your Layer0 installation to the latest version. This assumes you are using Layer0 version `v0.10.0` or later. -!!! note +!!! warning Layer0 does not support updating MAJOR or MINOR versions in place unless explicitly stated otherwise. - Users will need to destroy and re-create Layer0 instances in these circumstances. + Users will either need to create a new Layer0 instance and migrate to it or destroy and re-create their Layer0 instance in these circumstances. -Run the **upgrade** command, replacing `` and `` with the name of the Layer0 instance and new version, respectively: +Run the `upgrade` command, replacing `` and `` with the name of the Layer0 instance and new version, respectively: ``` -$ l0-setup upgrade +l0-setup upgrade ``` This will prompt you about the updated `source` and `version` inputs changing. If you are not satisfied with the changes, exit the application during the prompts. -For full control on changing inputs, please use the **set** command. +For full control on changing inputs, use the [set](../../reference/setup-cli#set) command. **Example Usage** ``` -$ l0-setup upgrade mylayer0 v0.10.1 +l0-setup upgrade mylayer0 v0.10.1 + This will update the 'version' input From: [v0.10.0] To: [v0.10.1] @@ -34,5 +35,5 @@ This will update the 'source' input Everything looks good! You are now ready to run 'l0-setup apply mylayer0' ``` -As stated by the command output, run the **apply** command to apply the changes to the Layer0 instance. +As stated by the command output, run the [apply](../../reference/setup-cli#apply) command to apply the changes to the Layer0 instance. If any errors occur, please contact the Layer0 team. diff --git a/docs-src/docs/stylesheets/extra.css b/docs-src/docs/stylesheets/extra.css index 2a527eea4..118c00da0 100644 --- a/docs-src/docs/stylesheets/extra.css +++ b/docs-src/docs/stylesheets/extra.css @@ -1,15 +1,20 @@ + +:root { + --theme-color:#e84e40; +} + .article h2 { font-size:18pt; } .article h3 { - color: #e84e40; + color: var(--theme-color); font-size:14pt; font-weight:normal; } .article h4 { - color:#e84e40; + color: var(--theme-color); font-weight:bold; font-style:normal; } @@ -64,10 +69,6 @@ color: #ddd; } -.admonition.note a { - color: #7ff; -} - .article a.note { color:#ffffff; border-bottom:1px dotted; @@ -75,5 +76,71 @@ .article a.note:hover { border-bottom: 1px solid; - color:#e84e40; + color: var(--theme-color); +} + +.md-nav__link:hover { + color: var(--theme-color); +} + +.md-typeset h2[id] .headerlink:focus, .md-typeset h2[id]:hover .headerlink:hover, .md-typeset h2[id]:target .headerlink { + color: var(--theme-color); +} + +/* Change Note Box Colors */ +.md-typeset .admonition, .md-typeset details { + -webkit-box-shadow: 0 2px 2px 0 rgba(0,0,0,.14), 0 1px 5px 0 rgba(0,0,0,.12), 0 3px 1px -2px rgba(0,0,0,.2); + box-shadow: 0 2px 2px 0 rgba(0,0,0,.14), 0 1px 5px 0 rgba(0,0,0,.12), 0 3px 1px -2px rgba(0,0,0,.2); + position: relative; + margin: 1.5625em 0; + padding: 0 1.2rem; + border-left: .4rem solid var(--theme-color); + border-radius: .2rem; + font-size: 1.28rem; + overflow: auto; +} + +.md-typeset .admonition>.admonition-title, .md-typeset .admonition>summary, .md-typeset details>.admonition-title, .md-typeset details>summary { + margin: 0 -1.2rem; + padding: .8rem 1.2rem .8rem 4rem; + border-bottom: .1rem solid rgba(68,138,255,.1); + background-color: #e84e404d; + font-weight: 700; +} + +.md-typeset .admonition>.admonition-title:before, .md-typeset .admonition>summary:before, .md-typeset details>.admonition-title:before, .md-typeset details>summary:before { + position: absolute; + left: 1.2rem; + color: var(--theme-color); + font-size: 2rem; + content: "\E3C9"; +} + +/* Code Block */ +.md-typeset code, .md-typeset pre { + background-color: rgba(237, 237, 237, 0.226); + color: #0a1c25; + font-size: 85%; +} + +/* Logo Header */ +.md-header-nav__button.md-logo img { + width: 42px; + height: 42px; +} + +a.md-content__icon, .md-footer-nav__button, .md-header-nav__button.md-logo, .md-nav__button, .md-search-result__article--document:before { + display: block; + margin: .4rem; + padding: 0.2rem; + font-size: 2.4rem; + cursor: pointer; +} + +/* Title Size */ +.md-header-nav__title { + padding: 0; + display: block; + font-size: 2.0rem; + line-height: 4.8rem; } diff --git a/docs-src/docs/troubleshooting/commonissues.md b/docs-src/docs/troubleshooting/commonissues.md index 556e1382a..f86ac7fef 100644 --- a/docs-src/docs/troubleshooting/commonissues.md +++ b/docs-src/docs/troubleshooting/commonissues.md @@ -2,22 +2,29 @@ ##"Connection refused" error when executing Layer0 commands -When executing commands using the Layer0 CLI, you may see the following error message: "Get http://localhost:9090/_command_/: dial tcp 127.0.0.1:9090: connection refused", where _command_ is the Layer0 command you are trying to execute. +When executing commands using the Layer0 CLI, you may see the following error message: -This error indicates that your Layer0 environment variables have not been set for the current session. See the ["Configure environment variables" section](http://localhost:8000/setup/install/#part-4-configure-environment-variables) of the Layer0 installation guide for instructions for setting up your environment variables. +`Get http://localhost:9090/command/: dial tcp 127.0.0.1:9090: connection refused` + +Where `command` is the Layer0 command you are trying to execute. + +This error indicates that your Layer0 environment variables have not been set for the current session. See the ["Connect to a Layer0 Instance" section](../setup/install/#part-4-connect-to-a-layer0-instance) of the Layer0 installation guide for instructions for setting up your environment variables. --- -##"Invalid Dockerrun.aws.json" error when creating a deploy -###Byte Order Marks (BOM) in Dockerrun file +## "Invalid Dockerrun.aws.json" error when creating a deploy +### Byte Order Marks (BOM) in Dockerrun file If your Dockerrun.aws.json file contains a Byte Order Marker, you may receive an "Invalid Dockerrun.aws.json" error when creating a deploy. If you create or edit the Dockerrun file using Visual Studio, and you have not modified the file encoding settings in Visual Studio, you are likely to encounter this error. **To remove the BOM:** * At the command line, type the following to remove the BOM: - * (Linux/OS X) **tail -c +4** _DockerrunFile_ **>** _DockerrunFileNew_ -

Replace _DockerrunFile_ with the path to your Dockerrun file, and _DockerrunFileNew_ with a new name for the Dockerrun file without the BOM. + * (Linux/OS X) + + `tail -c +4 DockerrunFile > DockerrunFileNew` + + Replace `DockerrunFile` with the path to your Dockerrun file, and `DockerrunFileNew` with a new name for the Dockerrun file without the BOM. Alternatively, you can use the [dos2unix file converter](https://sourceforge.net/projects/dos2unix/) to remove the BOM from your Dockerrun files. Dos2unix is available for Windows, Linux and Mac OS. @@ -25,19 +32,23 @@ Alternatively, you can use the [dos2unix file converter](https://sourceforge.net * At the command line, type the following: - * **dos2unix --remove-bom -n** _DockerrunFile_ _DockerrunFileNew_ -

Replace _DockerrunFile_ with the path to your Dockerrun file, and _DockerrunFileNew_ with a new name for the Dockerrun file without the BOM. +``` +dos2unix --remove-bom -n DockerrunFile DockerrunFileNew +``` + +Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM. --- -##"AWS Error: the key pair '' does not exist (code 'ValidationError')" with l0-setup +## "AWS Error: the key pair '' does not exist (code 'ValidationError')" with l0-setup + +This occurs when you pass an invalid EC2 keypair to l0-setup. To fix this, follow the instructions for [creating an EC2 Key Pair](../setup/install/#part-2-create-an-access-key). -This occurs when you pass a non-existent EC2 keypair to l0-setup. To fix this, follow the instructions for [creating an EC2 Key Pair](/install/#part-2-create-an-access-key). +1. After you've created a new EC2 Key Pair, use [l0-setup init](reference/setup-cli/#init) to reconfigure your instance: -1. After you've created a new EC2 Key Pair, run the following command: -
    -
  • **l0-setup plan** *prefix* **-var key_pair**=*keypair*
  • -
+``` +l0-setup init --aws-ssh-key-pair keypair +``` - - - - + - - + {% block site_meta %} + + + + {% if page and page.meta and page.meta.description %} + + {% elif config.site_description %} + + {% endif %} + {% if page.canonical_url %} + + {% endif %} + {% if page and page.meta and page.meta.author %} + + {% elif config.site_author %} + + {% endif %} + {% for key in [ + "clipboard.copy", + "clipboard.copied", + "search.language", + "search.result.none", + "search.result.one", + "search.result.other", + "search.tokenizer" + ] %} + + {% endfor %} + + + {% endblock %} {% block htmltitle %} - {% if page_title %} - {{ page_title }} - {{ site_name }} - {% elif page_description %} - {{ site_name }} - {{ page_description }} + {% if page and page.meta and page.meta.title %} + {{ page.meta.title }} + {% elif page and page.title and not page.is_homepage %} + {{ page.title }} - {{ config.site_name }} {% else %} - {{ site_name }} + {{ config.site_name }} {% endif %} - {% if page_description %} - - {% endif %} - {% if canonical_url %} - + {% endblock %} + {% block styles %} + + {% if palette.primary or palette.accent %} + {% endif %} - {% if site_author %} - + {% endblock %} + {% block libs %} + + {% endblock %} + {% block fonts %} + {% if font != false %} + + {% endif %} + {% endblock %} - - - - - - - {% if config.extra.logo %} - - {% endif %} - {% set icon = icon | default("assets/images/favicon-e565ddfa3b.ico") %} - - - - - {% if config.extra.palette %} - - {% endif %} - {% if config.extra.font != "none" %} - {% set text = config.extra.get("font", {}).text | default("Ubuntu") %} - {% set code = config.extra.get("font", {}).code | default("Ubuntu Mono") %} - {% set font = text + ':400,700|' + code | replace(' ', '+') %} - - - {% endif %} {% for path in extra_css %} {% endfor %} - {% block extrahead %}{% endblock %} - {% set palette = config.extra.get("palette", {}) %} - {% set primary = palette.primary | replace(' ', '-') | lower %} - {% set accent = palette.accent | replace(' ', '-') | lower %} - - {% if repo_name == "GitHub" and repo_url %} - {% set repo_id = repo_url | replace("https://github.com/", "") %} - {% if repo_id[-1:] == "/" %} - {% set repo_id = repo_id[:-1] %} - {% endif %} + {% if palette.primary or palette.accent %} + {% set primary = palette.primary | replace(" ", "-") | lower %} + {% set accent = palette.accent | replace(" ", "-") | lower %} + {% if palette.primary and palette.accent %} + + {% elif palette.primary %} + + {% elif palette.accent %} + {% endif %} -
-
-
- - - -
- {% include "header.html" %} -
-
- {% set h1 = "\x3ch1 id=" in content %} -
- {% include "drawer.html" %} -
-
-
- {% if not h1 %} -

{{ page_title | default(site_name, true)}}

- {% endif %} - {{ content }} - - {% block footer %} -
- {% include "footer.html" %} -
{% endblock %} -
-
-
-
-
-
-
+
+
+ {% block content %} + {% if page.edit_url %} + + {% endif %} + {% if not "\x3ch1" in page.content %} +

{{ page.title | default(config.site_name, true)}}

+ {% endif %} + {{ page.content }} + {% block source %} + {% if page and page.meta and page.meta.source %} +

{{ lang.t("meta.source") }}

+ {% set path = page.meta.path | default([""]) %} + {% set file = page.meta.source %} + + {{ file }} + + {% endif %} + {% endblock %} + {% endblock %} + {% block disqus %} + {% if config.extra.disqus and not page.is_homepage %} +

{{ lang.t("meta.comments") }}

+ {% include "partials/integrations/disqus.html" %} + {% endif %} + {% endblock %} +
-
-
- - - {% for path in extra_javascript %} - - {% endfor %} - {% if google_analytics %} - - {% endif %} + + {% block footer %} + {% include "partials/footer.html" %} + {% endblock %} + + {% block scripts %} + + {% if lang.t("search.language") != "en" %} + {% set languages = lang.t("search.language").split(",") %} + {% if languages | length and languages[0] != "" %} + {% set path = base_url + "/assets/javascripts/lunr" %} + + {% for language in languages | map("trim") %} + {% if language != "en" %} + {% if language == "jp" %} + + {% endif %} + + {% endif %} + {% endfor %} + {% if languages | length > 1 %} + + {% endif %} + {% endif %} + {% endif %} + + {% for path in extra_javascript %} + + {% endfor %} + {% endblock %} + {% block analytics %} + {% if config.google_analytics %} + {% include "partials/integrations/analytics.html" %} + {% endif %} + {% endblock %} - \ No newline at end of file + diff --git a/docs-src/material/main.html b/docs-src/material/main.html new file mode 100644 index 000000000..94d9808cc --- /dev/null +++ b/docs-src/material/main.html @@ -0,0 +1 @@ +{% extends "base.html" %} diff --git a/docs-src/material/mkdocs_theme.yml b/docs-src/material/mkdocs_theme.yml new file mode 100644 index 000000000..c7e3e28c3 --- /dev/null +++ b/docs-src/material/mkdocs_theme.yml @@ -0,0 +1,70 @@ +# Copyright (c) 2016-2017 Martin Donath + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# Language for theme localization +language: en + +# Feature flags for functionality that alters behavior significantly, and thus +# may be a matter of taste +feature: + + # Another layer on top of the main navigation for larger screens in the form + # of tabs, especially useful for larger documentation projects + tabs: false + +# Sets the primary and accent color palettes as defined in the Material Design +# documentation - possible values can be looked up in the getting started guide +palette: + + # Primary color used for header, sidebar and links, default: indigo + primary: + + # Accent color for highlighting user interaction, default: indigo + accent: + +# Fonts used by Material, automatically loaded from Google Fonts - see the site +# for a list of available fonts +font: + + # Default font for text + text: Roboto + + # Fixed-width font for code listings + code: Roboto Mono + +# Favicon to be rendered +favicon: assets/images/favicon.png + +# The logo of the documentation shown in the header and navigation can either +# be a Material Icon ligature (see https://material.io/icons/) or an image URL +logo: + icon: "\uE80C" + +# Material includes the search in the header as a partial, not as a separate +# template, so it's correct that search.html is missing +include_search_page: false + +# Material doesn't use MkDocs search functionality but provides its own. For +# this reason, only the search index needs to be built +search_index_only: true + +# Static pages to build +static_templates: + - 404.html diff --git a/docs-src/material/partials/footer.html b/docs-src/material/partials/footer.html new file mode 100644 index 000000000..449d9dfcd --- /dev/null +++ b/docs-src/material/partials/footer.html @@ -0,0 +1,58 @@ +{% import "partials/language.html" as lang with context %} + diff --git a/docs-src/material/partials/header.html b/docs-src/material/partials/header.html new file mode 100644 index 000000000..906da3e07 --- /dev/null +++ b/docs-src/material/partials/header.html @@ -0,0 +1,49 @@ +
+ +
diff --git a/docs-src/material/partials/hero.html b/docs-src/material/partials/hero.html new file mode 100644 index 000000000..9f6d77e92 --- /dev/null +++ b/docs-src/material/partials/hero.html @@ -0,0 +1,10 @@ +{% set feature = config.theme.feature %} +{% set class = "md-hero" %} +{% if not feature.tabs %} + {% set class = "md-hero md-hero--expand" %} +{% endif %} +
+
+ {{ page.meta.hero }} +
+
diff --git a/docs-src/material/partials/integrations/analytics.html b/docs-src/material/partials/integrations/analytics.html new file mode 100644 index 000000000..2b0fcdfdc --- /dev/null +++ b/docs-src/material/partials/integrations/analytics.html @@ -0,0 +1 @@ + diff --git a/docs-src/material/partials/integrations/disqus.html b/docs-src/material/partials/integrations/disqus.html new file mode 100644 index 000000000..5f003ca41 --- /dev/null +++ b/docs-src/material/partials/integrations/disqus.html @@ -0,0 +1,14 @@ +
+ diff --git a/docs-src/material/partials/language.html b/docs-src/material/partials/language.html new file mode 100644 index 000000000..278339b74 --- /dev/null +++ b/docs-src/material/partials/language.html @@ -0,0 +1,9 @@ +{% import "partials/language/" + config.theme.language + ".html" as lang %} +{% macro t(key) %}{{ { + "search.language": ( + config.extra.search | default({}) + ).language | default(config.theme.language, true), + "search.tokenizer": ( + config.extra.search | default({}) + ).tokenizer | default("", true), +}[key] or lang.t(key) }}{% endmacro %} diff --git a/docs-src/material/partials/language/da.html b/docs-src/material/partials/language/da.html new file mode 100644 index 000000000..e123b4998 --- /dev/null +++ b/docs-src/material/partials/language/da.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "da", + "clipboard.copy": "Kopiér til udklipsholderen", + "clipboard.copied": "Kopieret til udklipsholderen", + "edit.link.title": "Redigér denne side", + "footer.previous": "Forrige", + "footer.next": "Næste", + "meta.comments": "Kommentarer", + "meta.source": "Kilde", + "search.placeholder": "Søg", + "search.result.placeholder": "Indtask søgeord", + "search.result.none": "Ingen resultater fundet", + "search.result.one": "1 resultat", + "search.result.other": "# resultater", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Åbn arkiv", + "toc.title": "Indholdsfortegnelse" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/de.html b/docs-src/material/partials/language/de.html new file mode 100644 index 000000000..e5bbe53df --- /dev/null +++ b/docs-src/material/partials/language/de.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "de", + "clipboard.copy": "In Zwischenablage kopieren", + "clipboard.copied": "In Zwischenablage kopiert", + "edit.link.title": "Seite editieren", + "footer.previous": "Vorherige Seite", + "footer.next": "Nächste Seite", + "meta.comments": "Kommentare", + "meta.source": "Quellcode", + "search.placeholder": "Suche", + "search.result.placeholder": "Suchbegriff eingeben", + "search.result.none": "Keine Suchergebnisse", + "search.result.one": "1 Suchergebnis", + "search.result.other": "# Suchergebnisse", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Quellcode", + "toc.title": "Inhaltsverzeichnis" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/en.html b/docs-src/material/partials/language/en.html new file mode 100644 index 000000000..2068c97da --- /dev/null +++ b/docs-src/material/partials/language/en.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "en", + "clipboard.copy": "Copy to clipboard", + "clipboard.copied": "Copied to clipboard", + "edit.link.title": "Edit this page", + "footer.previous": "Previous", + "footer.next": "Next", + "meta.comments": "Comments", + "meta.source": "Source", + "search.placeholder": "Search", + "search.result.placeholder": "Type to start searching", + "search.result.none": "No matching documents", + "search.result.one": "1 matching document", + "search.result.other": "# matching documents", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Go to repository", + "toc.title": "Table of contents" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/es.html b/docs-src/material/partials/language/es.html new file mode 100644 index 000000000..1e2dbf68f --- /dev/null +++ b/docs-src/material/partials/language/es.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "es", + "clipboard.copy": "Copiar al portapapeles", + "clipboard.copied": "Copiado al portapapeles", + "edit.link.title": "Editar esta página", + "footer.previous": "Anterior", + "footer.next": "Siguiente", + "meta.comments": "Comentarios", + "meta.source": "Fuente", + "search.placeholder": "Búsqueda", + "search.result.placeholder": "Teclee para comenzar búsqueda", + "search.result.none": "No se encontraron documentos", + "search.result.one": "1 documento encontrado", + "search.result.other": "# documentos encontrados", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Ir al repositorio", + "toc.title": "Tabla de contenidos" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/fr.html b/docs-src/material/partials/language/fr.html new file mode 100644 index 000000000..87d7faa99 --- /dev/null +++ b/docs-src/material/partials/language/fr.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "fr", + "clipboard.copy": "Copier dans le presse-papier", + "clipboard.copied": "Copié dans le presse-papier", + "edit.link.title": "Editer cette page", + "footer.previous": "Précédent", + "footer.next": "Suivant", + "meta.comments": "Commentaires", + "meta.source": "Source", + "search.placeholder": "Rechercher", + "search.result.placeholder": "Taper pour démarrer la recherche", + "search.result.none": "Aucun document trouvé", + "search.result.one": "1 document trouvé", + "search.result.other": "# documents trouvés", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Aller au dépôt", + "toc.title": "Table des matières" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/it.html b/docs-src/material/partials/language/it.html new file mode 100644 index 000000000..d9fe6fe74 --- /dev/null +++ b/docs-src/material/partials/language/it.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "it", + "clipboard.copy": "Copia", + "clipboard.copied": "Copiato", + "edit.link.title": "Modifica", + "footer.previous": "Precedente", + "footer.next": "Prossimo", + "meta.comments": "Commenti", + "meta.source": "Sorgente", + "search.placeholder": "Cerca", + "search.result.placeholder": "Scrivi per iniziare a cercare", + "search.result.none": "Nessun documento trovato", + "search.result.one": "1 documento trovato", + "search.result.other": "# documenti trovati", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Apri repository", + "toc.title": "Indice" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/ja.html b/docs-src/material/partials/language/ja.html new file mode 100644 index 000000000..09c3b291f --- /dev/null +++ b/docs-src/material/partials/language/ja.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "ja", + "clipboard.copy": "クリップボードへコピー", + "clipboard.copied": "コピーしました", + "edit.link.title": "編集", + "footer.previous": "前", + "footer.next": "次", + "meta.comments": "コメント", + "meta.source": "ソース", + "search.placeholder": "検索", + "search.result.placeholder": "検索キーワードを入力してください", + "search.result.none": "何も見つかりませんでした", + "search.result.one": "1件見つかりました", + "search.result.other": "#件見つかりました", + "search.tokenizer": "[\s\- 、。,.]+", + "source.link.title": "リポジトリへ", + "toc.title": "目次" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/kr.html b/docs-src/material/partials/language/kr.html new file mode 100644 index 000000000..27163eb0b --- /dev/null +++ b/docs-src/material/partials/language/kr.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "kr", + "clipboard.copy": "클립보드로 복사", + "clipboard.copied": "클립보드에 복사됨", + "edit.link.title": "이 페이지를 편집", + "footer.previous": "이전", + "footer.next": "다음", + "meta.comments": "댓글", + "meta.source": "출처", + "search.placeholder": "검색", + "search.result.placeholder": "검색어를 입력하세요", + "search.result.none": "검색어와 일치하는 문서가 없습니다", + "search.result.one": "1개의 일치하는 문서", + "search.result.other": "#개의 일치하는 문서", + "search.tokenizer": "[\s\-]+", + "source.link.title": "저장소로 이동", + "toc.title": "목차" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/nl.html b/docs-src/material/partials/language/nl.html new file mode 100644 index 000000000..a7fe2e18b --- /dev/null +++ b/docs-src/material/partials/language/nl.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "nl", + "clipboard.copy": "Kopiëren naar klembord", + "clipboard.copied": "Gekopieerd naar klembord", + "edit.link.title": "Wijzig deze pagina", + "footer.previous": "Vorige", + "footer.next": "Volgende", + "meta.comments": "Reacties", + "meta.source": "Bron", + "search.placeholder": "Zoeken", + "search.result.placeholder": "Typ om te beginnen met zoeken", + "search.result.none": "Geen overeenkomende documenten", + "search.result.one": "1 overeenkomende document", + "search.result.other": "# overeenkomende documenten", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Ga naar repository", + "toc.title": "Inhoudstafel" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/no.html b/docs-src/material/partials/language/no.html new file mode 100644 index 000000000..63484a972 --- /dev/null +++ b/docs-src/material/partials/language/no.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "no", + "clipboard.copy": "Kopier til utklippstavlen", + "clipboard.copied": "Kopiert til utklippstavlen", + "edit.link.title": "Rediger denne siden", + "footer.previous": "Forrige", + "footer.next": "Neste", + "meta.comments": "Kommentarer", + "meta.source": "Kilde", + "search.placeholder": "Søk", + "search.result.placeholder": "Skriv søkeord", + "search.result.none": "Ingen treff", + "search.result.one": "1 treff", + "search.result.other": "# treff", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Gå til kilde", + "toc.title": "Innholdsfortegnelse" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/pl.html b/docs-src/material/partials/language/pl.html new file mode 100644 index 000000000..54889e5c3 --- /dev/null +++ b/docs-src/material/partials/language/pl.html @@ -0,0 +1 @@ +{% macro t(key) %}{{ { "language": "pl", "clipboard.copy": "Kopiuj do schowka", "clipboard.copied": "Skopiowane", "edit.link.title": "Edytuj tę stronę", "footer.previous": "Poprzednia strona", "footer.next": "Następna strona", "meta.comments": "Komentarze", "meta.source": "Kod źródłowy", "search.placeholder": "Szukaj", "search.result.placeholder": "Zacznij pisać, aby szukać", "search.result.none": "Brak wyników wyszukiwania", "search.result.one": "Wyniki wyszukiwania: 1", "search.result.other": "Wyniki wyszukiwania: #", "search.tokenizer": "[\s\-]+", "source.link.title": "Idź do repozytorium", "toc.title": "Spis treści" }[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/pt.html b/docs-src/material/partials/language/pt.html new file mode 100644 index 000000000..2e43fc9ed --- /dev/null +++ b/docs-src/material/partials/language/pt.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "pt", + "clipboard.copy": "Copiar para área de transferência", + "clipboard.copied": "Copiado para área de transferência", + "edit.link.title": "Editar esta página", + "footer.previous": "Anterior", + "footer.next": "Próximo", + "meta.comments": "Comentários", + "meta.source": "Fonte", + "search.placeholder": "Buscar", + "search.result.placeholder": "Digite para iniciar a busca", + "search.result.none": "Nenhum resultado encontrado", + "search.result.one": "1 resultado encontrado", + "search.result.other": "# resultados encontrados", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Ir ao repositório", + "toc.title": "Índice" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/ru.html b/docs-src/material/partials/language/ru.html new file mode 100644 index 000000000..d762438bb --- /dev/null +++ b/docs-src/material/partials/language/ru.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "ru", + "clipboard.copy": "Копировать в буфер", + "clipboard.copied": "Скопировано в буфер", + "edit.link.title": "Редактировать страницу", + "footer.previous": "Назад", + "footer.next": "Вперед", + "meta.comments": "Комментарии", + "meta.source": "Исходный код", + "search.placeholder": "Поиск", + "search.result.placeholder": "Начните печатать для поиска", + "search.result.none": "Совпадений не найдено", + "search.result.one": "Найдено 1 совпадение", + "search.result.other": "Найдено # совпадений", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Перейти к репозиторию", + "toc.title": "Содержание" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/sv.html b/docs-src/material/partials/language/sv.html new file mode 100644 index 000000000..1d164713e --- /dev/null +++ b/docs-src/material/partials/language/sv.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "sv", + "clipboard.copy": "Kopiera till urklipp", + "clipboard.copied": "Kopierat till urklipp", + "edit.link.title": "Redigera sidan", + "footer.previous": "Föregående", + "footer.next": "Nästa", + "meta.comments": "Kommentarer", + "meta.source": "Källa", + "search.placeholder": "Sök", + "search.result.placeholder": "Skriv sökord", + "search.result.none": "Inga sökresultat", + "search.result.one": "1 sökresultat", + "search.result.other": "# sökresultat", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Gå till datakatalog", + "toc.title": "Innehållsförteckning" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/tr.html b/docs-src/material/partials/language/tr.html new file mode 100644 index 000000000..43e477e3f --- /dev/null +++ b/docs-src/material/partials/language/tr.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "tr", + "clipboard.copy": "Kopyala", + "clipboard.copied": "Kopyalandı", + "edit.link.title": "Düzenle", + "footer.previous": "Önceki", + "footer.next": "Sonraki", + "meta.comments": "Yorumlar", + "meta.source": "Kaynak", + "search.placeholder": "Ara", + "search.result.placeholder": "Aramaya başlamak için yazın", + "search.result.none": "Eşleşen doküman bulunamadı", + "search.result.one": "1 doküman bulundu", + "search.result.other": "# doküman bulundu", + "search.tokenizer": "[\s\-]+", + "source.link.title": "Depoya git", + "toc.title": "İçindekiler" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/zh-Hant.html b/docs-src/material/partials/language/zh-Hant.html new file mode 100644 index 000000000..834e7b293 --- /dev/null +++ b/docs-src/material/partials/language/zh-Hant.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "zh-Hant", + "clipboard.copy": "拷貝", + "clipboard.copied": "已拷貝", + "edit.link.title": "編輯此頁", + "footer.previous": "上一頁", + "footer.next": "下一頁", + "meta.comments": "評論", + "meta.source": "來源", + "search.placeholder": "搜尋", + "search.result.placeholder": "鍵入以開始檢索", + "search.result.none": "沒有找到符合條件的結果", + "search.result.one": "找到 1 个符合條件的結果", + "search.result.other": "# 個符合條件的結果", + "search.tokenizer": "[\,\。]+", + "source.link.title": "前往 Github 倉庫", + "toc.title": "目錄" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/language/zh.html b/docs-src/material/partials/language/zh.html new file mode 100644 index 000000000..84c77d43c --- /dev/null +++ b/docs-src/material/partials/language/zh.html @@ -0,0 +1,18 @@ +{% macro t(key) %}{{ { + "language": "zh", + "clipboard.copy": "复制", + "clipboard.copied": "已复制", + "edit.link.title": "编辑此页", + "footer.previous": "后退", + "footer.next": "前进", + "meta.comments": "评论", + "meta.source": "来源", + "search.placeholder": "搜索", + "search.result.placeholder": "键入以开始搜索", + "search.result.none": "没有找到符合条件的结果", + "search.result.one": "找到 1 个符合条件的结果", + "search.result.other": "# 个符合条件的结果", + "search.tokenizer": "[\,\。]+", + "source.link.title": "前往 Github 仓库", + "toc.title": "目录" +}[key] }}{% endmacro %} diff --git a/docs-src/material/partials/nav-item.html b/docs-src/material/partials/nav-item.html new file mode 100644 index 000000000..c3e0b51ba --- /dev/null +++ b/docs-src/material/partials/nav-item.html @@ -0,0 +1,54 @@ +{% set class = "md-nav__item" %} +{% if nav_item.active %} + {% set class = "md-nav__item md-nav__item--active" %} +{% endif %} +{% if nav_item.children %} +
  • + {% if nav_item.active %} + + {% else %} + + {% endif %} + + +
  • +{% elif nav_item == page %} +
  • + {% set toc_ = page.toc %} + + {% if toc_ | first is defined and "\x3ch1 id=" in page.content %} + {% set toc_ = (toc_ | first).children %} + {% endif %} + {% if toc_ | first is defined %} + + {% endif %} + + {{ nav_item.title }} + + {% if toc_ | first is defined %} + {% include "partials/toc.html" %} + {% endif %} +
  • +{% else %} +
  • + + {{ nav_item.title }} + +
  • +{% endif %} diff --git a/docs-src/material/partials/nav.html b/docs-src/material/partials/nav.html new file mode 100644 index 000000000..64c5c9e00 --- /dev/null +++ b/docs-src/material/partials/nav.html @@ -0,0 +1,24 @@ + diff --git a/docs-src/material/partials/search.html b/docs-src/material/partials/search.html new file mode 100644 index 000000000..49e564a48 --- /dev/null +++ b/docs-src/material/partials/search.html @@ -0,0 +1,21 @@ +{% import "partials/language.html" as lang with context %} + diff --git a/docs-src/material/partials/social.html b/docs-src/material/partials/social.html new file mode 100644 index 000000000..cb68735ad --- /dev/null +++ b/docs-src/material/partials/social.html @@ -0,0 +1,9 @@ +{% if config.extra.social %} + +{% endif %} diff --git a/docs-src/material/partials/source.html b/docs-src/material/partials/source.html new file mode 100644 index 000000000..48d4eb1aa --- /dev/null +++ b/docs-src/material/partials/source.html @@ -0,0 +1,25 @@ +{% import "partials/language.html" as lang with context %} +{% set platform = config.extra.repo_icon or config.repo_url %} +{% if "github" in platform %} + {% set repo_type = "github" %} +{% elif "gitlab" in platform %} + {% set repo_type = "gitlab" %} +{% elif "bitbucket" in platform %} + {% set repo_type = "bitbucket" %} +{% else %} + {% set repo_type = "" %} +{% endif %} +{% block repo %} + + {% if repo_type %} +
    + + + +
    + {% endif %} +
    + {{ config.repo_name }} +
    +
    +{% endblock %} diff --git a/docs-src/material/partials/tabs-item.html b/docs-src/material/partials/tabs-item.html new file mode 100644 index 000000000..686b5a59b --- /dev/null +++ b/docs-src/material/partials/tabs-item.html @@ -0,0 +1,31 @@ +{% if nav_item.is_homepage %} +
  • + {% if not page.ancestors | length and nav | selectattr("url", page.url) %} + + {{ nav_item.title }} + + {% else %} + + {{ nav_item.title }} + + {% endif %} +
  • +{% elif nav_item.children and nav_item.children | length > 0 %} + {% set title = title | default(nav_item.title) %} + {% if (nav_item.children | first).children | length > 0 %} + {% set nav_item = nav_item.children | first %} + {% include "partials/tabs-item.html" %} + {% else %} +
  • + {% if nav_item.active %} + + {{ title }} + + {% else %} + + {{ title }} + + {% endif %} +
  • + {% endif %} +{% endif %} diff --git a/docs-src/material/partials/tabs.html b/docs-src/material/partials/tabs.html new file mode 100644 index 000000000..e040436bf --- /dev/null +++ b/docs-src/material/partials/tabs.html @@ -0,0 +1,13 @@ +{% set class = "md-tabs" %} +{% if page.ancestors | length > 0 %} + {% set class = "md-tabs md-tabs--active" %} +{% endif %} + diff --git a/docs-src/material/partials/toc-item.html b/docs-src/material/partials/toc-item.html new file mode 100644 index 000000000..3b4f4d76c --- /dev/null +++ b/docs-src/material/partials/toc-item.html @@ -0,0 +1,14 @@ +
  • + + {{ toc_item.title }} + + {% if toc_item.children %} + + {% endif %} +
  • diff --git a/docs-src/material/partials/toc.html b/docs-src/material/partials/toc.html new file mode 100644 index 000000000..f268ac0c6 --- /dev/null +++ b/docs-src/material/partials/toc.html @@ -0,0 +1,29 @@ +{% import "partials/language.html" as lang with context %} + diff --git a/docs-src/mkdocs.yml b/docs-src/mkdocs.yml index c27bed00d..0ed3b3834 100644 --- a/docs-src/mkdocs.yml +++ b/docs-src/mkdocs.yml @@ -8,6 +8,7 @@ extra_css: ['stylesheets/extra.css'] # Repository repo_name: GitHub repo_url: https://github.com/quintilesims/layer0/ +edit_uri: blob/master/docs-src/docs/ pages: - Home: index.md @@ -21,8 +22,6 @@ pages: - "Walkthrough: Introduction": guides/walkthrough/intro.md - "Walkthrough: Deployment 1": guides/walkthrough/deployment-1.md - "Walkthrough: Deployment 2": guides/walkthrough/deployment-2.md - - "Walkthrough: Deployment 3": guides/walkthrough/deployment-3.md - - Terraform beyond Layer0: guides/terraform_beyond_layer0.md - One-off Task: guides/one_off_task.md - Reference: - Layer0 CLI: reference/cli.md @@ -39,19 +38,20 @@ pages: - Secure Shell (SSH): troubleshooting/ssh.md site_dir: ../docs -theme_dir: material -extra: - version: v0.10.3 - logo: 'static/logo_icon.png' - author: xfra + +theme: + name: 'material' + custom_dir: 'material' palette: primary: 'red' accent: 'grey' font: text: 'Roboto' code: 'Roboto Mono' + logo: 'static/logo_icon.png' + markdown_extensions: - - codehilite(css_class=code) + - codehilite - admonition - toc: permalink: '#' diff --git a/docs/404.html b/docs/404.html new file mode 100644 index 000000000..34939a634 --- /dev/null +++ b/docs/404.html @@ -0,0 +1,612 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Layer0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + +
    + + + + +
    +
    + + +
    +
    +
    + +
    +
    +
    + + + +
    +
    + +

    404 - Not found

    + + + + +
    +
    +
    +
    + + + + +
    + + + + + + + + + + + \ No newline at end of file diff --git a/docs/CNAME b/docs/CNAME index c5602626b..624126749 100644 --- a/docs/CNAME +++ b/docs/CNAME @@ -1 +1 @@ -layer0.ims.io \ No newline at end of file +layer0.ims.io diff --git a/docs/assets/fonts/icon.eot b/docs/assets/fonts/icon.eot old mode 100755 new mode 100644 diff --git a/docs/assets/fonts/icon.svg b/docs/assets/fonts/icon.svg old mode 100755 new mode 100644 diff --git a/docs/assets/fonts/icon.ttf b/docs/assets/fonts/icon.ttf old mode 100755 new mode 100644 diff --git a/docs/assets/fonts/icon.woff b/docs/assets/fonts/icon.woff old mode 100755 new mode 100644 diff --git a/docs/assets/images/favicon.png b/docs/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..76d17f57ad903c3ea2f1b564cafb95bf9af84ee3 GIT binary patch literal 521 zcmV+k0`~ohP)kdg0005dNkl2WptjAn6@db&Pvy?U$ zv>P|<&rCZfZF0jmq0opf8)91(A<*iIVPPJJT((+JiF~>9KAA3%heFdnI;SaK+~|aU zQ~!x`%y{jX1<~SK2RxN7Db8`yWBbf6p7&07{VXfaam*cUs&eu*Zu(xaIL8rP){;a< zS~$}^Td32Rw+W1TqTd|L{#~jJet4!qwKsb5hq%YXiiUV!yH=ltu0>s|FLsT+Iy7K~ z!6*Z0a@vQ;AiZo!=s{{fqR+ct6YQPzbk+j}*qe7vtu39I7 zrOtZqU}=NnLchJxsU9iY+}3TYDl|BvPsX%E@dlyLgdV%q$UP|Y?DfcGb`}K&$;drd z+hL;zy7UTccUYU+h`ONIU|d=%`(0$=KW4%tVWXj~AE \ No newline at end of file diff --git a/docs/assets/images/icons/github.a4034fb1.svg b/docs/assets/images/icons/github.a4034fb1.svg new file mode 100644 index 000000000..3cacb2e0f --- /dev/null +++ b/docs/assets/images/icons/github.a4034fb1.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/images/icons/gitlab.d80e5efc.svg b/docs/assets/images/icons/gitlab.d80e5efc.svg new file mode 100644 index 000000000..b036a9b52 --- /dev/null +++ b/docs/assets/images/icons/gitlab.d80e5efc.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/javascripts/application.cae2244d.js b/docs/assets/javascripts/application.cae2244d.js new file mode 100644 index 000000000..d36d0b687 --- /dev/null +++ b/docs/assets/javascripts/application.cae2244d.js @@ -0,0 +1 @@ +!function(e,t){for(var n in t)e[n]=t[n]}(window,function(e){function t(r){if(n[r])return n[r].exports;var i=n[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,t),i.l=!0,i.exports}var n={};return t.m=e,t.c=n,t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="",t(t.s=6)}([function(e,t,n){"use strict";t.__esModule=!0,t.default={createElement:function(e,t){var n=document.createElement(e);t&&Array.prototype.forEach.call(Object.keys(t),function(e){n.setAttribute(e,t[e])});for(var r=arguments.length,i=Array(r>2?r-2:0),o=2;o pre, pre > code");Array.prototype.forEach.call(n,function(t,n){var r="__code_"+n,i=e.createElement("button",{class:"md-clipboard",title:h("clipboard.copy"),"data-clipboard-target":"#"+r+" pre, #"+r+" code"},e.createElement("span",{class:"md-clipboard__message"})),o=t.parentNode;o.id=r,o.insertBefore(i,t)});new c.default(".md-clipboard").on("success",function(e){var t=e.trigger.querySelector(".md-clipboard__message");if(!(t instanceof HTMLElement))throw new ReferenceError;e.clearSelection(),t.dataset.mdTimer&&clearTimeout(parseInt(t.dataset.mdTimer,10)),t.classList.add("md-clipboard__message--active"),t.innerHTML=h("clipboard.copied"),t.dataset.mdTimer=setTimeout(function(){t.classList.remove("md-clipboard__message--active"),t.dataset.mdTimer=""},2e3).toString()})}if(!Modernizr.details){var r=document.querySelectorAll("details > summary");Array.prototype.forEach.call(r,function(e){e.addEventListener("click",function(e){var t=e.target.parentNode;t.hasAttribute("open")?t.removeAttribute("open"):t.setAttribute("open","")})})}var i=function(){if(document.location.hash){var e=document.getElementById(document.location.hash.substring(1));if(!e)return;for(var t=e.parentNode;t&&!(t instanceof HTMLDetailsElement);)t=t.parentNode;if(t&&!t.open){t.open=!0;var n=location.hash;location.hash=" ",location.hash=n}}};if(window.addEventListener("hashchange",i),i(),Modernizr.ios){var o=document.querySelectorAll("[data-md-scrollfix]");Array.prototype.forEach.call(o,function(e){e.addEventListener("touchstart",function(){var t=e.scrollTop;0===t?e.scrollTop=1:t+e.offsetHeight===e.scrollHeight&&(e.scrollTop=t-1)})})}}).listen(),new f.default.Event.Listener(window,["scroll","resize","orientationchange"],new f.default.Header.Shadow("[data-md-component=container]","[data-md-component=header]")).listen(),new f.default.Event.Listener(window,["scroll","resize","orientationchange"],new f.default.Header.Title("[data-md-component=title]",".md-typeset h1")).listen(),document.querySelector("[data-md-component=hero]")&&new f.default.Event.Listener(window,["scroll","resize","orientationchange"],new f.default.Tabs.Toggle("[data-md-component=hero]")).listen(),document.querySelector("[data-md-component=tabs]")&&new f.default.Event.Listener(window,["scroll","resize","orientationchange"],new f.default.Tabs.Toggle("[data-md-component=tabs]")).listen(),new f.default.Event.MatchMedia("(min-width: 1220px)",new f.default.Event.Listener(window,["scroll","resize","orientationchange"],new f.default.Sidebar.Position("[data-md-component=navigation]","[data-md-component=header]"))),document.querySelector("[data-md-component=toc]")&&new f.default.Event.MatchMedia("(min-width: 960px)",new f.default.Event.Listener(window,["scroll","resize","orientationchange"],new f.default.Sidebar.Position("[data-md-component=toc]","[data-md-component=header]"))),new f.default.Event.MatchMedia("(min-width: 960px)",new f.default.Event.Listener(window,"scroll",new f.default.Nav.Blur("[data-md-component=toc] [href]")));var n=document.querySelectorAll("[data-md-component=collapsible]");Array.prototype.forEach.call(n,function(e){new f.default.Event.MatchMedia("(min-width: 1220px)",new f.default.Event.Listener(e.previousElementSibling,"click",new f.default.Nav.Collapse(e)))}),new f.default.Event.MatchMedia("(max-width: 1219px)",new f.default.Event.Listener("[data-md-component=navigation] [data-md-toggle]","change",new f.default.Nav.Scrolling("[data-md-component=navigation] nav"))),new f.default.Event.MatchMedia("(max-width: 959px)",new f.default.Event.Listener("[data-md-toggle=search]","change",new f.default.Search.Lock("[data-md-toggle=search]"))),new f.default.Event.Listener("[data-md-component=query]",["focus","keyup","change"],new f.default.Search.Result("[data-md-component=result]",function(){return fetch(t.url.base+"/"+(t.version<"0.17"?"mkdocs":"search")+"/search_index.json",{credentials:"same-origin"}).then(function(e){return e.json()}).then(function(e){return e.docs.map(function(e){return e.location=t.url.base+e.location,e})})})).listen(),new f.default.Event.MatchMedia("(max-width: 959px)",new f.default.Event.Listener("[data-md-component=navigation] [href^='#']","click",function(){var e=document.querySelector("[data-md-toggle=drawer]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked&&(e.checked=!1,e.dispatchEvent(new CustomEvent("change")))})),new f.default.Event.Listener("[data-md-component=reset]","click",function(){setTimeout(function(){var e=document.querySelector("[data-md-component=query]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.focus()},10)}).listen(),new f.default.Event.Listener("[data-md-toggle=search]","change",function(e){setTimeout(function(e){if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t.focus()}},400,e.target)}).listen(),new f.default.Event.MatchMedia("(min-width: 960px)",new f.default.Event.Listener("[data-md-component=query]","focus",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked||(e.checked=!0,e.dispatchEvent(new CustomEvent("change")))})),new f.default.Event.Listener(window,"keydown",function(e){var t=document.querySelector("[data-md-toggle=search]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;var n=document.querySelector("[data-md-component=query]");if(!(n instanceof HTMLInputElement))throw new ReferenceError;if(!e.metaKey&&!e.ctrlKey)if(t.checked){if(13===e.keyCode){if(n===document.activeElement){e.preventDefault();var r=document.querySelector("[data-md-component=search] [href][data-md-state=active]");r instanceof HTMLLinkElement&&(window.location=r.getAttribute("href"),t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur())}}else if(27===e.keyCode)t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur();else if(-1!==[8,37,39].indexOf(e.keyCode))n!==document.activeElement&&n.focus();else if(-1!==[9,38,40].indexOf(e.keyCode)){var i=e.shiftKey?38:40,o=9===e.keyCode?i:e.keyCode,a=Array.prototype.slice.call(document.querySelectorAll("[data-md-component=query], [data-md-component=search] [href]")),s=a.find(function(e){if(!(e instanceof HTMLElement))throw new ReferenceError;return"active"===e.dataset.mdState});s&&(s.dataset.mdState="");var c=Math.max(0,(a.indexOf(s)+a.length+(38===o?-1:1))%a.length);return a[c]&&(a[c].dataset.mdState="active",a[c].focus()),e.preventDefault(),e.stopPropagation(),!1}}else document.activeElement&&!document.activeElement.form&&(70!==e.keyCode&&83!==e.keyCode||(n.focus(),e.preventDefault()))}).listen(),new f.default.Event.Listener(window,"keypress",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t!==document.activeElement&&t.focus()}}).listen(),function(){var e=document.querySelector("[data-md-source]");if(!e)return a.default.resolve([]);if(!(e instanceof HTMLAnchorElement))throw new ReferenceError;switch(e.dataset.mdSource){case"github":return new f.default.Source.Adapter.GitHub(e).fetch();default:return a.default.resolve([])}}().then(function(e){var t=document.querySelectorAll("[data-md-source]");Array.prototype.forEach.call(t,function(t){new f.default.Source.Repository(t).initialize(e)})})}t.__esModule=!0,t.app=void 0,n(7),n(8),n(9),n(10),n(11),n(12),n(13);var o=n(14),a=r(o),s=n(18),c=r(s),u=n(26),l=r(u),d=n(27),f=r(d);window.Promise=window.Promise||a.default;var h=function(e){var t=document.getElementsByName("lang:"+e)[0];if(!(t instanceof HTMLMetaElement))throw new ReferenceError;return t.content},p={initialize:i};t.app=p}).call(t,n(0))},function(e,t,n){e.exports=n.p+"assets/images/icons/bitbucket.4ebea66e.svg"},function(e,t,n){e.exports=n.p+"assets/images/icons/github.a4034fb1.svg"},function(e,t,n){e.exports=n.p+"assets/images/icons/gitlab.d80e5efc.svg"},function(e,t){},function(e,t){},function(e,t){try{var n=new window.CustomEvent("test");if(n.preventDefault(),!0!==n.defaultPrevented)throw new Error("Could not prevent default")}catch(e){var r=function(e,t){var n,r;return t=t||{bubbles:!1,cancelable:!1,detail:void 0},n=document.createEvent("CustomEvent"),n.initCustomEvent(e,t.bubbles,t.cancelable,t.detail),r=n.preventDefault,n.preventDefault=function(){r.call(this);try{Object.defineProperty(this,"defaultPrevented",{get:function(){return!0}})}catch(e){this.defaultPrevented=!0}},n};r.prototype=window.Event.prototype,window.CustomEvent=r}},function(e,t,n){window.fetch||(window.fetch=n(1).default||n(1))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e){function n(){}function r(e,t){return function(){e.apply(t,arguments)}}function i(e,t){for(;3===e._state;)e=e._value;if(0===e._state)return void e._deferreds.push(t);e._handled=!0,l._immediateFn(function(){var n=1===e._state?t.onFulfilled:t.onRejected;if(null===n)return void(1===e._state?o:a)(t.promise,e._value);var r;try{r=n(e._value)}catch(e){return void a(t.promise,e)}o(t.promise,r)})}function o(e,t){try{if(t===e)throw new TypeError("A promise cannot be resolved with itself.");if(t&&("object"==typeof t||"function"==typeof t)){var n=t.then;if(t instanceof l)return e._state=3,e._value=t,void s(e);if("function"==typeof n)return void u(r(n,t),e)}e._state=1,e._value=t,s(e)}catch(t){a(e,t)}}function a(e,t){e._state=2,e._value=t,s(e)}function s(e){2===e._state&&0===e._deferreds.length&&l._immediateFn(function(){e._handled||l._unhandledRejectionFn(e._value)});for(var t=0,n=e._deferreds.length;t=0&&(e._idleTimeoutId=setTimeout(function(){e._onTimeout&&e._onTimeout()},t))},n(16),t.setImmediate=setImmediate,t.clearImmediate=clearImmediate},function(e,t,n){(function(e,t){!function(e,n){"use strict";function r(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n0&&void 0!==arguments[0]?arguments[0]:{};this.action="function"==typeof e.action?e.action:this.defaultAction,this.target="function"==typeof e.target?e.target:this.defaultTarget,this.text="function"==typeof e.text?e.text:this.defaultText,this.container="object"===f(e.container)?e.container:document.body}},{key:"listenClick",value:function(e){var t=this;this.listener=(0,d.default)(e,"click",function(e){return t.onClick(e)})}},{key:"onClick",value:function(e){var t=e.delegateTarget||e.currentTarget;this.clipboardAction&&(this.clipboardAction=null),this.clipboardAction=new u.default({action:this.action(t),target:this.target(t),text:this.text(t),container:this.container,trigger:t,emitter:this})}},{key:"defaultAction",value:function(e){return c("action",e)}},{key:"defaultTarget",value:function(e){var t=c("target",e);if(t)return document.querySelector(t)}},{key:"defaultText",value:function(e){return c("text",e)}},{key:"destroy",value:function(){this.listener.destroy(),this.clipboardAction&&(this.clipboardAction.destroy(),this.clipboardAction=null)}}],[{key:"isSupported",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:["copy","cut"],t="string"==typeof e?[e]:e,n=!!document.queryCommandSupported;return t.forEach(function(e){n=n&&!!document.queryCommandSupported(e)}),n}}]),t}(l.default);e.exports=p})},function(e,t,n){var r,i,o;!function(a,s){i=[e,n(20)],r=s,void 0!==(o="function"==typeof r?r.apply(t,i):r)&&(e.exports=o)}(0,function(e,t){"use strict";function n(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}var r=function(e){return e&&e.__esModule?e:{default:e}}(t),i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=function(){function e(e,t){for(var n=0;n0&&void 0!==arguments[0]?arguments[0]:{};this.action=e.action,this.container=e.container,this.emitter=e.emitter,this.target=e.target,this.text=e.text,this.trigger=e.trigger,this.selectedText=""}},{key:"initSelection",value:function(){this.text?this.selectFake():this.target&&this.selectTarget()}},{key:"selectFake",value:function(){var e=this,t="rtl"==document.documentElement.getAttribute("dir");this.removeFake(),this.fakeHandlerCallback=function(){return e.removeFake()},this.fakeHandler=this.container.addEventListener("click",this.fakeHandlerCallback)||!0,this.fakeElem=document.createElement("textarea"),this.fakeElem.style.fontSize="12pt",this.fakeElem.style.border="0",this.fakeElem.style.padding="0",this.fakeElem.style.margin="0",this.fakeElem.style.position="absolute",this.fakeElem.style[t?"right":"left"]="-9999px";var n=window.pageYOffset||document.documentElement.scrollTop;this.fakeElem.style.top=n+"px",this.fakeElem.setAttribute("readonly",""),this.fakeElem.value=this.text,this.container.appendChild(this.fakeElem),this.selectedText=(0,r.default)(this.fakeElem),this.copyText()}},{key:"removeFake",value:function(){this.fakeHandler&&(this.container.removeEventListener("click",this.fakeHandlerCallback),this.fakeHandler=null,this.fakeHandlerCallback=null),this.fakeElem&&(this.container.removeChild(this.fakeElem),this.fakeElem=null)}},{key:"selectTarget",value:function(){this.selectedText=(0,r.default)(this.target),this.copyText()}},{key:"copyText",value:function(){var e=void 0;try{e=document.execCommand(this.action)}catch(t){e=!1}this.handleResult(e)}},{key:"handleResult",value:function(e){this.emitter.emit(e?"success":"error",{action:this.action,text:this.selectedText,trigger:this.trigger,clearSelection:this.clearSelection.bind(this)})}},{key:"clearSelection",value:function(){this.trigger&&this.trigger.focus(),window.getSelection().removeAllRanges()}},{key:"destroy",value:function(){this.removeFake()}},{key:"action",set:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"copy";if(this._action=e,"copy"!==this._action&&"cut"!==this._action)throw new Error('Invalid "action" value, use either "copy" or "cut"')},get:function(){return this._action}},{key:"target",set:function(e){if(void 0!==e){if(!e||"object"!==(void 0===e?"undefined":i(e))||1!==e.nodeType)throw new Error('Invalid "target" value, use a valid Element');if("copy"===this.action&&e.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if("cut"===this.action&&(e.hasAttribute("readonly")||e.hasAttribute("disabled")))throw new Error('Invalid "target" attribute. You can\'t cut text from elements with "readonly" or "disabled" attributes');this._target=e}},get:function(){return this._target}}]),e}();e.exports=a})},function(e,t){function n(e){var t;if("SELECT"===e.nodeName)e.focus(),t=e.value;else if("INPUT"===e.nodeName||"TEXTAREA"===e.nodeName){var n=e.hasAttribute("readonly");n||e.setAttribute("readonly",""),e.select(),e.setSelectionRange(0,e.value.length),n||e.removeAttribute("readonly"),t=e.value}else{e.hasAttribute("contenteditable")&&e.focus();var r=window.getSelection(),i=document.createRange();i.selectNodeContents(e),r.removeAllRanges(),r.addRange(i),t=r.toString()}return t}e.exports=n},function(e,t){function n(){}n.prototype={on:function(e,t,n){var r=this.e||(this.e={});return(r[e]||(r[e]=[])).push({fn:t,ctx:n}),this},once:function(e,t,n){function r(){i.off(e,r),t.apply(n,arguments)}var i=this;return r._=t,this.on(e,r,n)},emit:function(e){var t=[].slice.call(arguments,1),n=((this.e||(this.e={}))[e]||[]).slice(),r=0,i=n.length;for(r;r=0,a=navigator.userAgent.indexOf("Android")>0&&!o,s=/iP(ad|hone|od)/.test(navigator.userAgent)&&!o,c=s&&/OS 4_\d(_\d)?/.test(navigator.userAgent),u=s&&/OS [6-7]_\d/.test(navigator.userAgent),l=navigator.userAgent.indexOf("BB10")>0;i.prototype.needsClick=function(e){switch(e.nodeName.toLowerCase()){case"button":case"select":case"textarea":if(e.disabled)return!0;break;case"input":if(s&&"file"===e.type||e.disabled)return!0;break;case"label":case"iframe":case"video":return!0}return/\bneedsclick\b/.test(e.className)},i.prototype.needsFocus=function(e){switch(e.nodeName.toLowerCase()){case"textarea":return!0;case"select":return!a;case"input":switch(e.type){case"button":case"checkbox":case"file":case"image":case"radio":case"submit":return!1}return!e.disabled&&!e.readOnly;default:return/\bneedsfocus\b/.test(e.className)}},i.prototype.sendClick=function(e,t){var n,r;document.activeElement&&document.activeElement!==e&&document.activeElement.blur(),r=t.changedTouches[0],n=document.createEvent("MouseEvents"),n.initMouseEvent(this.determineEventType(e),!0,!0,window,1,r.screenX,r.screenY,r.clientX,r.clientY,!1,!1,!1,!1,0,null),n.forwardedTouchEvent=!0,e.dispatchEvent(n)},i.prototype.determineEventType=function(e){return a&&"select"===e.tagName.toLowerCase()?"mousedown":"click"},i.prototype.focus=function(e){var t;s&&e.setSelectionRange&&0!==e.type.indexOf("date")&&"time"!==e.type&&"month"!==e.type?(t=e.value.length,e.setSelectionRange(t,t)):e.focus()},i.prototype.updateScrollParent=function(e){var t,n;if(!(t=e.fastClickScrollParent)||!t.contains(e)){n=e;do{if(n.scrollHeight>n.offsetHeight){t=n,e.fastClickScrollParent=n;break}n=n.parentElement}while(n)}t&&(t.fastClickLastScrollTop=t.scrollTop)},i.prototype.getTargetElementFromEventTarget=function(e){return e.nodeType===Node.TEXT_NODE?e.parentNode:e},i.prototype.onTouchStart=function(e){var t,n,r;if(e.targetTouches.length>1)return!0;if(t=this.getTargetElementFromEventTarget(e.target),n=e.targetTouches[0],s){if(r=window.getSelection(),r.rangeCount&&!r.isCollapsed)return!0;if(!c){if(n.identifier&&n.identifier===this.lastTouchIdentifier)return e.preventDefault(),!1;this.lastTouchIdentifier=n.identifier,this.updateScrollParent(t)}}return this.trackingClick=!0,this.trackingClickStart=e.timeStamp,this.targetElement=t,this.touchStartX=n.pageX,this.touchStartY=n.pageY,e.timeStamp-this.lastClickTimen||Math.abs(t.pageY-this.touchStartY)>n},i.prototype.onTouchMove=function(e){return!this.trackingClick||((this.targetElement!==this.getTargetElementFromEventTarget(e.target)||this.touchHasMoved(e))&&(this.trackingClick=!1,this.targetElement=null),!0)},i.prototype.findControl=function(e){return void 0!==e.control?e.control:e.htmlFor?document.getElementById(e.htmlFor):e.querySelector("button, input:not([type=hidden]), keygen, meter, output, progress, select, textarea")},i.prototype.onTouchEnd=function(e){var t,n,r,i,o,l=this.targetElement;if(!this.trackingClick)return!0;if(e.timeStamp-this.lastClickTimethis.tapTimeout)return!0;if(this.cancelNextClick=!1,this.lastClickTime=e.timeStamp,n=this.trackingClickStart,this.trackingClick=!1,this.trackingClickStart=0,u&&(o=e.changedTouches[0],l=document.elementFromPoint(o.pageX-window.pageXOffset,o.pageY-window.pageYOffset)||l,l.fastClickScrollParent=this.targetElement.fastClickScrollParent),"label"===(r=l.tagName.toLowerCase())){if(t=this.findControl(l)){if(this.focus(l),a)return!1;l=t}}else if(this.needsFocus(l))return e.timeStamp-n>100||s&&window.top!==window&&"input"===r?(this.targetElement=null,!1):(this.focus(l),this.sendClick(l,e),s&&"select"===r||(this.targetElement=null,e.preventDefault()),!1);return!(!s||c||!(i=l.fastClickScrollParent)||i.fastClickLastScrollTop===i.scrollTop)||(this.needsClick(l)||(e.preventDefault(),this.sendClick(l,e)),!1)},i.prototype.onTouchCancel=function(){this.trackingClick=!1,this.targetElement=null},i.prototype.onMouse=function(e){return!this.targetElement||(!!e.forwardedTouchEvent||(!e.cancelable||(!(!this.needsClick(this.targetElement)||this.cancelNextClick)||(e.stopImmediatePropagation?e.stopImmediatePropagation():e.propagationStopped=!0,e.stopPropagation(),e.preventDefault(),!1))))},i.prototype.onClick=function(e){var t;return this.trackingClick?(this.targetElement=null,this.trackingClick=!1,!0):"submit"===e.target.type&&0===e.detail||(t=this.onMouse(e),t||(this.targetElement=null),t)},i.prototype.destroy=function(){var e=this.layer;a&&(e.removeEventListener("mouseover",this.onMouse,!0),e.removeEventListener("mousedown",this.onMouse,!0),e.removeEventListener("mouseup",this.onMouse,!0)),e.removeEventListener("click",this.onClick,!0),e.removeEventListener("touchstart",this.onTouchStart,!1),e.removeEventListener("touchmove",this.onTouchMove,!1),e.removeEventListener("touchend",this.onTouchEnd,!1),e.removeEventListener("touchcancel",this.onTouchCancel,!1)},i.notNeeded=function(e){var t,n,r;if(void 0===window.ontouchstart)return!0;if(n=+(/Chrome\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1]){if(!a)return!0;if(t=document.querySelector("meta[name=viewport]")){if(-1!==t.content.indexOf("user-scalable=no"))return!0;if(n>31&&document.documentElement.scrollWidth<=window.outerWidth)return!0}}if(l&&(r=navigator.userAgent.match(/Version\/([0-9]*)\.([0-9]*)/),r[1]>=10&&r[2]>=3&&(t=document.querySelector("meta[name=viewport]")))){if(-1!==t.content.indexOf("user-scalable=no"))return!0;if(document.documentElement.scrollWidth<=window.outerWidth)return!0}return"none"===e.style.msTouchAction||"manipulation"===e.style.touchAction||(!!(+(/Firefox\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1]>=27&&(t=document.querySelector("meta[name=viewport]"))&&(-1!==t.content.indexOf("user-scalable=no")||document.documentElement.scrollWidth<=window.outerWidth))||("none"===e.style.touchAction||"manipulation"===e.style.touchAction))},i.attach=function(e,t){return new i(e,t)},void 0!==(r=function(){return i}.call(t,n,t,e))&&(e.exports=r)}()},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(28),o=r(i),a=n(30),s=r(a),c=n(33),u=r(c),l=n(37),d=r(l),f=n(43),h=r(f),p=n(45),m=r(p),v=n(51),y=r(v);t.default={Event:o.default,Header:s.default,Nav:u.default,Search:d.default,Sidebar:h.default,Source:m.default,Tabs:y.default}},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(3),o=r(i),a=n(29),s=r(a);t.default={Listener:o.default,MatchMedia:s.default}},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=n(3),o=(function(e){e&&e.__esModule}(i),function e(t,n){r(this,e),this.handler_=function(e){e.matches?n.listen():n.unlisten()};var i=window.matchMedia(t);i.addListener(this.handler_),this.handler_(i)});t.default=o},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(31),o=r(i),a=n(32),s=r(a);t.default={Shadow:o.default,Title:s.default}},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=function(){function e(t,n){r(this,e);var i="string"==typeof t?document.querySelector(t):t;if(!(i instanceof HTMLElement&&i.parentNode instanceof HTMLElement))throw new ReferenceError;if(this.el_=i.parentNode,!((i="string"==typeof n?document.querySelector(n):n)instanceof HTMLElement))throw new ReferenceError;this.header_=i,this.height_=0,this.active_=!1}return e.prototype.setup=function(){for(var e=this.el_;e=e.previousElementSibling;){if(!(e instanceof HTMLElement))throw new ReferenceError;this.height_+=e.offsetHeight}this.update()},e.prototype.update=function(e){if(!e||"resize"!==e.type&&"orientationchange"!==e.type){var t=window.pageYOffset>=this.height_;t!==this.active_&&(this.header_.dataset.mdState=(this.active_=t)?"shadow":"")}else this.height_=0,this.setup()},e.prototype.reset=function(){this.header_.dataset.mdState="",this.height_=0,this.active_=!1},e}();t.default=i},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=function(){function e(t,n){r(this,e);var i="string"==typeof t?document.querySelector(t):t;if(!(i instanceof HTMLElement))throw new ReferenceError;if(this.el_=i,!((i="string"==typeof n?document.querySelector(n):n)instanceof HTMLHeadingElement))throw new ReferenceError;this.header_=i,this.active_=!1}return e.prototype.setup=function(){var e=this;Array.prototype.forEach.call(this.el_.children,function(t){t.style.width=e.el_.offsetWidth-20+"px"})},e.prototype.update=function(e){var t=this,n=window.pageYOffset>=this.header_.offsetTop;n!==this.active_&&(this.el_.dataset.mdState=(this.active_=n)?"active":""),"resize"!==e.type&&"orientationchange"!==e.type||Array.prototype.forEach.call(this.el_.children,function(e){e.style.width=t.el_.offsetWidth-20+"px"})},e.prototype.reset=function(){this.el_.dataset.mdState="",this.el_.style.width="",this.active_=!1},e}();t.default=i},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(34),o=r(i),a=n(35),s=r(a),c=n(36),u=r(c);t.default={Blur:o.default,Collapse:s.default,Scrolling:u.default}},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=function(){function e(t){r(this,e),this.els_="string"==typeof t?document.querySelectorAll(t):t,this.index_=0,this.offset_=window.pageYOffset,this.dir_=!1,this.anchors_=[].reduce.call(this.els_,function(e,t){return e.concat(document.getElementById(t.hash.substring(1))||[])},[])}return e.prototype.setup=function(){this.update()},e.prototype.update=function(){var e=window.pageYOffset,t=this.offset_-e<0;if(this.dir_!==t&&(this.index_=this.index_=t?0:this.els_.length-1),0!==this.anchors_.length){if(this.offset_<=e)for(var n=this.index_+1;n0&&(this.els_[n-1].dataset.mdState="blur"),this.index_=n;else for(var r=this.index_;r>=0;r--){if(!(this.anchors_[r].offsetTop-80>e)){this.index_=r;break}r>0&&(this.els_[r-1].dataset.mdState="")}this.offset_=e,this.dir_=t}},e.prototype.reset=function(){Array.prototype.forEach.call(this.els_,function(e){e.dataset.mdState=""}),this.index_=0,this.offset_=window.pageYOffset},e}();t.default=i},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=function(){function e(t){r(this,e);var n="string"==typeof t?document.querySelector(t):t;if(!(n instanceof HTMLElement))throw new ReferenceError;this.el_=n}return e.prototype.update=function(){var e=this,t=this.el_.getBoundingClientRect().height;if(t)this.el_.style.maxHeight=t+"px",requestAnimationFrame(function(){e.el_.setAttribute("data-md-state","animate"),e.el_.style.maxHeight="0px"});else{this.el_.setAttribute("data-md-state","expand"),this.el_.style.maxHeight="";var n=this.el_.getBoundingClientRect().height;this.el_.removeAttribute("data-md-state"),this.el_.style.maxHeight="0px",requestAnimationFrame(function(){e.el_.setAttribute("data-md-state","animate"),e.el_.style.maxHeight=n+"px"})}var r=function e(t){var n=t.target;if(!(n instanceof HTMLElement))throw new ReferenceError;n.removeAttribute("data-md-state"),n.style.maxHeight="",n.removeEventListener("transitionend",e)};this.el_.addEventListener("transitionend",r,!1)},e.prototype.reset=function(){this.el_.dataset.mdState="",this.el_.style.maxHeight=""},e}();t.default=i},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=function(){function e(t){r(this,e);var n="string"==typeof t?document.querySelector(t):t;if(!(n instanceof HTMLElement))throw new ReferenceError;this.el_=n}return e.prototype.setup=function(){this.el_.children[this.el_.children.length-1].style.webkitOverflowScrolling="touch";var e=this.el_.querySelectorAll("[data-md-toggle]");Array.prototype.forEach.call(e,function(e){if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=e.nextElementSibling;if(!(t instanceof HTMLElement))throw new ReferenceError;for(;"NAV"!==t.tagName&&t.nextElementSibling;)t=t.nextElementSibling;if(!(e.parentNode instanceof HTMLElement&&e.parentNode.parentNode instanceof HTMLElement))throw new ReferenceError;var n=e.parentNode.parentNode,r=t.children[t.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling="touch"}})},e.prototype.update=function(e){var t=e.target;if(!(t instanceof HTMLElement))throw new ReferenceError;var n=t.nextElementSibling;if(!(n instanceof HTMLElement))throw new ReferenceError;for(;"NAV"!==n.tagName&&n.nextElementSibling;)n=n.nextElementSibling;if(!(t.parentNode instanceof HTMLElement&&t.parentNode.parentNode instanceof HTMLElement))throw new ReferenceError;var r=t.parentNode.parentNode,i=n.children[n.children.length-1];if(r.style.webkitOverflowScrolling="",i.style.webkitOverflowScrolling="",!t.checked){var o=function e(){n instanceof HTMLElement&&(r.style.webkitOverflowScrolling="touch",n.removeEventListener("transitionend",e))};n.addEventListener("transitionend",o,!1)}if(t.checked){var a=function e(){n instanceof HTMLElement&&(i.style.webkitOverflowScrolling="touch",n.removeEventListener("transitionend",e))};n.addEventListener("transitionend",a,!1)}},e.prototype.reset=function(){this.el_.children[1].style.webkitOverflowScrolling="";var e=this.el_.querySelectorAll("[data-md-toggle]");Array.prototype.forEach.call(e,function(e){if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=e.nextElementSibling;if(!(t instanceof HTMLElement))throw new ReferenceError;for(;"NAV"!==t.tagName&&t.nextElementSibling;)t=t.nextElementSibling;if(!(e.parentNode instanceof HTMLElement&&e.parentNode.parentNode instanceof HTMLElement))throw new ReferenceError;var n=e.parentNode.parentNode,r=t.children[t.children.length-1];n.style.webkitOverflowScrolling="",r.style.webkitOverflowScrolling=""}})},e}();t.default=i},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(38),o=r(i),a=n(39),s=r(a);t.default={Lock:o.default,Result:s.default}},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=function(){function e(t){r(this,e);var n="string"==typeof t?document.querySelector(t):t;if(!(n instanceof HTMLInputElement))throw new ReferenceError;if(this.el_=n,!document.body)throw new ReferenceError;this.lock_=document.body}return e.prototype.setup=function(){this.update()},e.prototype.update=function(){var e=this;this.el_.checked?(this.offset_=window.pageYOffset,setTimeout(function(){window.scrollTo(0,0),e.el_.checked&&(e.lock_.dataset.mdState="lock")},400)):(this.lock_.dataset.mdState="",setTimeout(function(){void 0!==e.offset_&&window.scrollTo(0,e.offset_)},100))},e.prototype.reset=function(){"lock"===this.lock_.dataset.mdState&&window.scrollTo(0,this.offset_),this.lock_.dataset.mdState=""},e}();t.default=i},function(e,t,n){"use strict";(function(e){function r(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var o=n(40),a=r(o),s=n(41),c=r(s),u=function(e,t){var n=t;if(e.length>n){for(;" "!==e[n]&&--n>0;);return e.substring(0,n)+"..."}return e},l=function(e){var t=document.getElementsByName("lang:"+e)[0];if(!(t instanceof HTMLMetaElement))throw new ReferenceError;return t.content},d=function(){function t(e,n){i(this,t);var r="string"==typeof e?document.querySelector(e):e;if(!(r instanceof HTMLElement))throw new ReferenceError;this.el_=r;var o=Array.prototype.slice.call(this.el_.children),a=o[0],s=o[1];this.data_=n,this.meta_=a,this.list_=s,this.message_={placeholder:this.meta_.textContent,none:l("search.result.none"),one:l("search.result.one"),other:l("search.result.other")};var u=l("search.tokenizer");u.length&&(c.default.tokenizer.separator=u),this.lang_=l("search.language").split(",").filter(Boolean).map(function(e){return e.trim()})}return t.prototype.update=function(t){var n=this;if("focus"!==t.type||this.index_){if("focus"===t.type||"keyup"===t.type){var r=t.target;if(!(r instanceof HTMLInputElement))throw new ReferenceError;if(!this.index_||r.value===this.value_)return;for(;this.list_.firstChild;)this.list_.removeChild(this.list_.firstChild);if(this.value_=r.value,0===this.value_.length)return void(this.meta_.textContent=this.message_.placeholder);var i=this.index_.query(function(e){n.value_.toLowerCase().split(" ").filter(Boolean).forEach(function(t){e.term(t,{wildcard:c.default.Query.wildcard.TRAILING})})}).reduce(function(e,t){var r=n.docs_.get(t.ref);if(r.parent){var i=r.parent.location;e.set(i,(e.get(i)||[]).concat(t))}else{var o=r.location;e.set(o,e.get(o)||[])}return e},new Map),o=(0,a.default)(this.value_.trim()).replace(new RegExp(c.default.tokenizer.separator,"img"),"|"),s=new RegExp("(^|"+c.default.tokenizer.separator+")("+o+")","img"),l=function(e,t,n){return t+""+n+""};this.stack_=[],i.forEach(function(t,r){var i,o=n.docs_.get(r),a=e.createElement("li",{class:"md-search-result__item"},e.createElement("a",{href:o.location,title:o.title,class:"md-search-result__link"},e.createElement("article",{class:"md-search-result__article md-search-result__article--document"},e.createElement("h1",{class:"md-search-result__title"},{__html:o.title.replace(s,l)}),o.text.length?e.createElement("p",{class:"md-search-result__teaser"},{__html:o.text.replace(s,l)}):{}))),c=t.map(function(t){return function(){var r=n.docs_.get(t.ref);a.appendChild(e.createElement("a",{href:r.location,title:r.title,class:"md-search-result__link","data-md-rel":"anchor"},e.createElement("article",{class:"md-search-result__article"},e.createElement("h1",{class:"md-search-result__title"},{__html:r.title.replace(s,l)}),r.text.length?e.createElement("p",{class:"md-search-result__teaser"},{__html:u(r.text.replace(s,l),400)}):{})))}});(i=n.stack_).push.apply(i,[function(){return n.list_.appendChild(a)}].concat(c))});var d=this.el_.parentNode;if(!(d instanceof HTMLElement))throw new ReferenceError;for(;this.stack_.length&&d.offsetHeight>=d.scrollHeight-16;)this.stack_.shift()();var f=this.list_.querySelectorAll("[data-md-rel=anchor]");switch(Array.prototype.forEach.call(f,function(e){e.addEventListener("click",function(t){var n=document.querySelector("[data-md-toggle=search]");if(!(n instanceof HTMLInputElement))throw new ReferenceError;n.checked&&(n.checked=!1,n.dispatchEvent(new CustomEvent("change"))),t.preventDefault(),setTimeout(function(){document.location.href=e.href},100)})}),i.size){case 0:this.meta_.textContent=this.message_.none;break;case 1:this.meta_.textContent=this.message_.one;break;default:this.meta_.textContent=this.message_.other.replace("#",i.size)}}}else{var h=function(e){n.docs_=e.reduce(function(e,t){var n=t.location.split("#"),r=n[0];return n[1]&&(t.parent=e.get(r),t.parent&&!t.parent.done&&(t.parent.title=t.title,t.parent.text=t.text,t.parent.done=!0)),t.text=t.text.replace(/\n/g," ").replace(/\s+/g," ").replace(/\s+([,.:;!?])/g,function(e,t){return t}),t.parent&&t.parent.title===t.title||e.set(t.location,t),e},new Map);var t=n.docs_,r=n.lang_;n.stack_=[],n.index_=(0,c.default)(function(){var e=this;this.pipeline.reset(),this.pipeline.add(c.default.trimmer,c.default.stopWordFilter),1===r.length&&"en"!==r[0]?this.use(c.default[r[0]]):r.length>1&&this.use(c.default.multiLanguage.apply(c.default,r)),this.field("title",{boost:10}),this.field("text"),this.ref("location"),t.forEach(function(t){return e.add(t)})});var i=n.el_.parentNode;if(!(i instanceof HTMLElement))throw new ReferenceError;i.addEventListener("scroll",function(){for(;n.stack_.length&&i.scrollTop+i.offsetHeight>=i.scrollHeight-16;)n.stack_.splice(0,10).forEach(function(e){return e()})})};setTimeout(function(){return"function"==typeof n.data_?n.data_().then(h):h(n.data_)},250)}},t}();t.default=d}).call(t,n(0))},function(e,t,n){"use strict";var r=/[|\\{}()[\]^$+*?.]/g;e.exports=function(e){if("string"!=typeof e)throw new TypeError("Expected a string");return e.replace(r,"\\$&")}},function(e,t,n){(function(t){e.exports=t.lunr=n(42)}).call(t,n(2))},function(e,t,n){var r,i;!function(){var o=function(e){var t=new o.Builder;return t.pipeline.add(o.trimmer,o.stopWordFilter,o.stemmer),t.searchPipeline.add(o.stemmer),e.call(t,t),t.build()};o.version="2.1.4",o.utils={},o.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),o.utils.asString=function(e){return void 0===e||null===e?"":e.toString()},o.FieldRef=function(e,t,n){this.docRef=e,this.fieldName=t,this._stringValue=n},o.FieldRef.joiner="/",o.FieldRef.fromString=function(e){var t=e.indexOf(o.FieldRef.joiner);if(-1===t)throw"malformed field ref string";var n=e.slice(0,t),r=e.slice(t+1);return new o.FieldRef(r,n,e)},o.FieldRef.prototype.toString=function(){return void 0==this._stringValue&&(this._stringValue=this.fieldName+o.FieldRef.joiner+this.docRef),this._stringValue},o.idf=function(e,t){var n=0;for(var r in e)"_index"!=r&&(n+=Object.keys(e[r]).length);var i=(t-n+.5)/(n+.5);return Math.log(1+Math.abs(i))},o.Token=function(e,t){this.str=e||"",this.metadata=t||{}},o.Token.prototype.toString=function(){return this.str},o.Token.prototype.update=function(e){return this.str=e(this.str,this.metadata),this},o.Token.prototype.clone=function(e){return e=e||function(e){return e},new o.Token(e(this.str,this.metadata),this.metadata)},o.tokenizer=function(e){if(null==e||void 0==e)return[];if(Array.isArray(e))return e.map(function(e){return new o.Token(o.utils.asString(e).toLowerCase())});for(var t=e.toString().trim().toLowerCase(),n=t.length,r=[],i=0,a=0;i<=n;i++){var s=t.charAt(i),c=i-a;(s.match(o.tokenizer.separator)||i==n)&&(c>0&&r.push(new o.Token(t.slice(a,i),{position:[a,c],index:r.length})),a=i+1)}return r},o.tokenizer.separator=/[\s\-]+/,o.Pipeline=function(){this._stack=[]},o.Pipeline.registeredFunctions=Object.create(null),o.Pipeline.registerFunction=function(e,t){t in this.registeredFunctions&&o.utils.warn("Overwriting existing registered function: "+t),e.label=t,o.Pipeline.registeredFunctions[e.label]=e},o.Pipeline.warnIfFunctionNotRegistered=function(e){e.label&&e.label in this.registeredFunctions||o.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},o.Pipeline.load=function(e){var t=new o.Pipeline;return e.forEach(function(e){var n=o.Pipeline.registeredFunctions[e];if(!n)throw new Error("Cannot load unregistered function: "+e);t.add(n)}),t},o.Pipeline.prototype.add=function(){Array.prototype.slice.call(arguments).forEach(function(e){o.Pipeline.warnIfFunctionNotRegistered(e),this._stack.push(e)},this)},o.Pipeline.prototype.after=function(e,t){o.Pipeline.warnIfFunctionNotRegistered(t);var n=this._stack.indexOf(e);if(-1==n)throw new Error("Cannot find existingFn");n+=1,this._stack.splice(n,0,t)},o.Pipeline.prototype.before=function(e,t){o.Pipeline.warnIfFunctionNotRegistered(t);var n=this._stack.indexOf(e);if(-1==n)throw new Error("Cannot find existingFn");this._stack.splice(n,0,t)},o.Pipeline.prototype.remove=function(e){var t=this._stack.indexOf(e);-1!=t&&this._stack.splice(t,1)},o.Pipeline.prototype.run=function(e){for(var t=this._stack.length,n=0;n1&&(oe&&(n=i),o!=e);)r=n-t,i=t+Math.floor(r/2),o=this.elements[2*i];return o==e?2*i:o>e?2*i:os?u+=2:a==s&&(t+=n[c+1]*r[u+1],c+=2,u+=2);return t},o.Vector.prototype.similarity=function(e){return this.dot(e)/(this.magnitude()*e.magnitude())},o.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),t=1,n=0;t0){var a,s=i.str.charAt(0);s in i.node.edges?a=i.node.edges[s]:(a=new o.TokenSet,i.node.edges[s]=a),1==i.str.length?a.final=!0:r.push({node:a,editsRemaining:i.editsRemaining,str:i.str.slice(1)})}if(i.editsRemaining>0&&i.str.length>1){var c,s=i.str.charAt(1);s in i.node.edges?c=i.node.edges[s]:(c=new o.TokenSet,i.node.edges[s]=c),i.str.length<=2?c.final=!0:r.push({node:c,editsRemaining:i.editsRemaining-1,str:i.str.slice(2)})}if(i.editsRemaining>0&&1==i.str.length&&(i.node.final=!0),i.editsRemaining>0&&i.str.length>=1){if("*"in i.node.edges)var u=i.node.edges["*"];else{var u=new o.TokenSet;i.node.edges["*"]=u}1==i.str.length?u.final=!0:r.push({node:u,editsRemaining:i.editsRemaining-1,str:i.str.slice(1)})}if(i.editsRemaining>0){if("*"in i.node.edges)var l=i.node.edges["*"];else{var l=new o.TokenSet;i.node.edges["*"]=l}0==i.str.length?l.final=!0:r.push({node:l,editsRemaining:i.editsRemaining-1,str:i.str})}if(i.editsRemaining>0&&i.str.length>1){var d,f=i.str.charAt(0),h=i.str.charAt(1);h in i.node.edges?d=i.node.edges[h]:(d=new o.TokenSet,i.node.edges[h]=d),1==i.str.length?d.final=!0:r.push({node:d,editsRemaining:i.editsRemaining-1,str:f+i.str.slice(2)})}}return n},o.TokenSet.fromString=function(e){for(var t=new o.TokenSet,n=t,r=!1,i=0,a=e.length;i=e;t--){var n=this.uncheckedNodes[t],r=n.child.toString();r in this.minimizedNodes?n.parent.edges[n.char]=this.minimizedNodes[r]:(n.child._str=r,this.minimizedNodes[r]=n.child),this.uncheckedNodes.pop()}},o.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},o.Index.prototype.search=function(e){return this.query(function(t){new o.QueryParser(e,t).parse()})},o.Index.prototype.query=function(e){var t=new o.Query(this.fields),n=Object.create(null),r=Object.create(null),i=Object.create(null);e.call(t,t);for(var a=0;a1?1:e},o.Builder.prototype.k1=function(e){this._k1=e},o.Builder.prototype.add=function(e){var t=e[this._ref];this.documentCount+=1;for(var n=0;n=this.length)return o.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},o.QueryLexer.prototype.width=function(){return this.pos-this.start},o.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},o.QueryLexer.prototype.backup=function(){this.pos-=1},o.QueryLexer.prototype.acceptDigitRun=function(){var e,t;do{e=this.next(),t=e.charCodeAt(0)}while(t>47&&t<58);e!=o.QueryLexer.EOS&&this.backup()},o.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(o.QueryLexer.TERM)),e.ignore(),e.more())return o.QueryLexer.lexText},o.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(o.QueryLexer.EDIT_DISTANCE),o.QueryLexer.lexText},o.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(o.QueryLexer.BOOST),o.QueryLexer.lexText},o.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(o.QueryLexer.TERM)},o.QueryLexer.termSeparator=o.tokenizer.separator,o.QueryLexer.lexText=function(e){for(;;){var t=e.next();if(t==o.QueryLexer.EOS)return o.QueryLexer.lexEOS;if(92!=t.charCodeAt(0)){if(":"==t)return o.QueryLexer.lexField;if("~"==t)return e.backup(),e.width()>0&&e.emit(o.QueryLexer.TERM),o.QueryLexer.lexEditDistance;if("^"==t)return e.backup(),e.width()>0&&e.emit(o.QueryLexer.TERM),o.QueryLexer.lexBoost;if(t.match(o.QueryLexer.termSeparator))return o.QueryLexer.lexTerm}else e.escapeCharacter()}},o.QueryParser=function(e,t){this.lexer=new o.QueryLexer(e),this.query=t,this.currentClause={},this.lexemeIdx=0},o.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=o.QueryParser.parseFieldOrTerm;e;)e=e(this);return this.query},o.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},o.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},o.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},o.QueryParser.parseFieldOrTerm=function(e){var t=e.peekLexeme();if(void 0!=t)switch(t.type){case o.QueryLexer.FIELD:return o.QueryParser.parseField;case o.QueryLexer.TERM:return o.QueryParser.parseTerm;default:var n="expected either a field or a term, found "+t.type;throw t.str.length>=1&&(n+=" with value '"+t.str+"'"),new o.QueryParseError(n,t.start,t.end)}},o.QueryParser.parseField=function(e){var t=e.consumeLexeme();if(void 0!=t){if(-1==e.query.allFields.indexOf(t.str)){var n=e.query.allFields.map(function(e){return"'"+e+"'"}).join(", "),r="unrecognised field '"+t.str+"', possible fields: "+n;throw new o.QueryParseError(r,t.start,t.end)}e.currentClause.fields=[t.str];var i=e.peekLexeme();if(void 0==i){var r="expecting term, found nothing";throw new o.QueryParseError(r,t.start,t.end)}switch(i.type){case o.QueryLexer.TERM:return o.QueryParser.parseTerm;default:var r="expecting term, found '"+i.type+"'";throw new o.QueryParseError(r,i.start,i.end)}}},o.QueryParser.parseTerm=function(e){var t=e.consumeLexeme();if(void 0!=t){e.currentClause.term=t.str.toLowerCase(),-1!=t.str.indexOf("*")&&(e.currentClause.usePipeline=!1);var n=e.peekLexeme();if(void 0==n)return void e.nextClause();switch(n.type){case o.QueryLexer.TERM:return e.nextClause(),o.QueryParser.parseTerm;case o.QueryLexer.FIELD:return e.nextClause(),o.QueryParser.parseField;case o.QueryLexer.EDIT_DISTANCE:return o.QueryParser.parseEditDistance;case o.QueryLexer.BOOST:return o.QueryParser.parseBoost;default:var r="Unexpected lexeme type '"+n.type+"'";throw new o.QueryParseError(r,n.start,n.end)}}},o.QueryParser.parseEditDistance=function(e){var t=e.consumeLexeme();if(void 0!=t){var n=parseInt(t.str,10);if(isNaN(n)){var r="edit distance must be numeric";throw new o.QueryParseError(r,t.start,t.end)}e.currentClause.editDistance=n;var i=e.peekLexeme();if(void 0==i)return void e.nextClause();switch(i.type){case o.QueryLexer.TERM:return e.nextClause(),o.QueryParser.parseTerm;case o.QueryLexer.FIELD:return e.nextClause(),o.QueryParser.parseField;case o.QueryLexer.EDIT_DISTANCE:return o.QueryParser.parseEditDistance;case o.QueryLexer.BOOST:return o.QueryParser.parseBoost;default:var r="Unexpected lexeme type '"+i.type+"'";throw new o.QueryParseError(r,i.start,i.end)}}},o.QueryParser.parseBoost=function(e){var t=e.consumeLexeme();if(void 0!=t){var n=parseInt(t.str,10);if(isNaN(n)){var r="boost must be numeric";throw new o.QueryParseError(r,t.start,t.end)}e.currentClause.boost=n;var i=e.peekLexeme();if(void 0==i)return void e.nextClause();switch(i.type){case o.QueryLexer.TERM:return e.nextClause(),o.QueryParser.parseTerm;case o.QueryLexer.FIELD:return e.nextClause(),o.QueryParser.parseField;case o.QueryLexer.EDIT_DISTANCE:return o.QueryParser.parseEditDistance;case o.QueryLexer.BOOST:return o.QueryParser.parseBoost;default:var r="Unexpected lexeme type '"+i.type+"'";throw new o.QueryParseError(r,i.start,i.end)}}},function(o,a){r=a,void 0!==(i="function"==typeof r?r.call(t,n,t,e):r)&&(e.exports=i)}(0,function(){return o})}()},function(e,t,n){"use strict";t.__esModule=!0;var r=n(44),i=function(e){return e&&e.__esModule?e:{default:e}}(r);t.default={Position:i.default}},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=function(){function e(t,n){r(this,e);var i="string"==typeof t?document.querySelector(t):t;if(!(i instanceof HTMLElement&&i.parentNode instanceof HTMLElement))throw new ReferenceError;if(this.el_=i,this.parent_=i.parentNode,!((i="string"==typeof n?document.querySelector(n):n)instanceof HTMLElement))throw new ReferenceError;this.header_=i,this.height_=0,this.pad_="fixed"===window.getComputedStyle(this.header_).position}return e.prototype.setup=function(){var e=Array.prototype.reduce.call(this.parent_.children,function(e,t){return Math.max(e,t.offsetTop)},0);this.offset_=e-(this.pad_?this.header_.offsetHeight:0),this.update()},e.prototype.update=function(e){var t=window.pageYOffset,n=window.innerHeight;e&&"resize"===e.type&&this.setup();var r={top:this.pad_?this.header_.offsetHeight:0,bottom:this.parent_.offsetTop+this.parent_.offsetHeight},i=n-r.top-Math.max(0,this.offset_-t)-Math.max(0,t+n-r.bottom);i!==this.height_&&(this.el_.style.height=(this.height_=i)+"px"),t>=this.offset_?"lock"!==this.el_.dataset.mdState&&(this.el_.dataset.mdState="lock"):"lock"===this.el_.dataset.mdState&&(this.el_.dataset.mdState="")},e.prototype.reset=function(){this.el_.dataset.mdState="",this.el_.style.height="",this.height_=0},e}();t.default=i},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(46),o=r(i),a=n(50),s=r(a);t.default={Adapter:o.default,Repository:s.default}},function(e,t,n){"use strict";t.__esModule=!0;var r=n(47),i=function(e){return e&&e.__esModule?e:{default:e}}(r);t.default={GitHub:i.default}},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function o(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var a=n(48),s=function(e){return e&&e.__esModule?e:{default:e}}(a),c=function(e){function t(n){r(this,t);var o=i(this,e.call(this,n)),a=/^.+github\.com\/([^\/]+)\/?([^\/]+)?.*$/.exec(o.base_);if(a&&3===a.length){var s=a[1],c=a[2];o.base_="https://api.github.com/users/"+s+"/repos",o.name_=c}return o}return o(t,e),t.prototype.fetch_=function(){var e=this;return function t(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0;return fetch(e.base_+"?per_page=30&page="+n).then(function(e){return e.json()}).then(function(r){if(!(r instanceof Array))throw new TypeError;if(e.name_){var i=r.find(function(t){return t.name===e.name_});return i||30!==r.length?i?[e.format_(i.stargazers_count)+" Stars",e.format_(i.forks_count)+" Forks"]:[]:t(n+1)}return[r.length+" Repositories"]})}()},t}(s.default);t.default=c},function(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var i=n(49),o=function(e){return e&&e.__esModule?e:{default:e}}(i),a=function(){function e(t){r(this,e);var n="string"==typeof t?document.querySelector(t):t;if(!(n instanceof HTMLAnchorElement))throw new ReferenceError;this.el_=n,this.base_=this.el_.href,this.salt_=this.hash_(this.base_)}return e.prototype.fetch=function(){var e=this;return new Promise(function(t){var n=o.default.getJSON(e.salt_+".cache-source");void 0!==n?t(n):e.fetch_().then(function(n){o.default.set(e.salt_+".cache-source",n,{expires:1/96}),t(n)})})},e.prototype.fetch_=function(){throw new Error("fetch_(): Not implemented")},e.prototype.format_=function(e){return e>1e4?(e/1e3).toFixed(0)+"k":e>1e3?(e/1e3).toFixed(1)+"k":""+e},e.prototype.hash_=function(e){var t=0;if(0===e.length)return t;for(var n=0,r=e.length;n1){if(o=e({path:"/"},r.defaults,o),"number"==typeof o.expires){var s=new Date;s.setMilliseconds(s.getMilliseconds()+864e5*o.expires),o.expires=s}o.expires=o.expires?o.expires.toUTCString():"";try{a=JSON.stringify(i),/^[\{\[]/.test(a)&&(i=a)}catch(e){}i=n.write?n.write(i,t):encodeURIComponent(String(i)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var c="";for(var u in o)o[u]&&(c+="; "+u,!0!==o[u]&&(c+="="+o[u]));return document.cookie=t+"="+i+c}t||(a={});for(var l=document.cookie?document.cookie.split("; "):[],d=/(%[0-9A-Z]{2})+/g,f=0;f=this.el_.children[0].offsetTop+-43;e!==this.active_&&(this.el_.dataset.mdState=(this.active_=e)?"hidden":"")},e.prototype.reset=function(){this.el_.dataset.mdState="",this.active_=!1},e}();t.default=i}])); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.da.js b/docs/assets/javascripts/lunr/lunr.da.js new file mode 100644 index 000000000..215dbcc1f --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.da.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.da=function(){this.pipeline.reset(),this.pipeline.add(e.da.trimmer,e.da.stopWordFilter,e.da.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.da.stemmer))},e.da.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.da.trimmer=e.trimmerSupport.generateTrimmer(e.da.wordCharacters),e.Pipeline.registerFunction(e.da.trimmer,"trimmer-da"),e.da.stemmer=function(){var r=e.stemmerSupport.Among,i=e.stemmerSupport.SnowballProgram,n=new function(){function e(){var e,r=l.limit-l.cursor;l.cursor>=t&&(e=l.limit_backward,l.limit_backward=t,l.ket=l.cursor,l.find_among_b(a,4)?(l.bra=l.cursor,l.limit_backward=e,l.cursor=l.limit-r,l.cursor>l.limit_backward&&(l.cursor--,l.bra=l.cursor,l.slice_del())):l.limit_backward=e)}var n,t,s,o=[new r("hed",-1,1),new r("ethed",0,1),new r("ered",-1,1),new r("e",-1,1),new r("erede",3,1),new r("ende",3,1),new r("erende",5,1),new r("ene",3,1),new r("erne",3,1),new r("ere",3,1),new r("en",-1,1),new r("heden",10,1),new r("eren",10,1),new r("er",-1,1),new r("heder",13,1),new r("erer",13,1),new r("s",-1,2),new r("heds",16,1),new r("es",16,1),new r("endes",18,1),new r("erendes",19,1),new r("enes",18,1),new r("ernes",18,1),new r("eres",18,1),new r("ens",16,1),new r("hedens",24,1),new r("erens",24,1),new r("ers",16,1),new r("ets",16,1),new r("erets",28,1),new r("et",-1,1),new r("eret",30,1)],a=[new r("gd",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1)],d=[new r("ig",-1,1),new r("lig",0,1),new r("elig",1,1),new r("els",-1,1),new r("løst",-1,2)],u=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],c=[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16],l=new i;this.setCurrent=function(e){l.setCurrent(e)},this.getCurrent=function(){return l.getCurrent()},this.stem=function(){var r=l.cursor;return function(){var e,r=l.cursor+3;if(t=l.limit,0<=r&&r<=l.limit){for(n=r;;){if(e=l.cursor,l.in_grouping(u,97,248)){l.cursor=e;break}if(l.cursor=e,e>=l.limit)return;l.cursor++}for(;!l.out_grouping(u,97,248);){if(l.cursor>=l.limit)return;l.cursor++}(t=l.cursor)=t&&(r=l.limit_backward,l.limit_backward=t,l.ket=l.cursor,e=l.find_among_b(o,32),l.limit_backward=r,e))switch(l.bra=l.cursor,e){case 1:l.slice_del();break;case 2:l.in_grouping_b(c,97,229)&&l.slice_del()}}(),l.cursor=l.limit,e(),l.cursor=l.limit,function(){var r,i,n,s=l.limit-l.cursor;if(l.ket=l.cursor,l.eq_s_b(2,"st")&&(l.bra=l.cursor,l.eq_s_b(2,"ig")&&l.slice_del()),l.cursor=l.limit-s,l.cursor>=t&&(i=l.limit_backward,l.limit_backward=t,l.ket=l.cursor,r=l.find_among_b(d,5),l.limit_backward=i,r))switch(l.bra=l.cursor,r){case 1:l.slice_del(),n=l.limit-l.cursor,e(),l.cursor=l.limit-n;break;case 2:l.slice_from("løs")}}(),l.cursor=l.limit,function(){var e;l.cursor>=t&&(e=l.limit_backward,l.limit_backward=t,l.ket=l.cursor,l.out_grouping_b(u,97,248)?(l.bra=l.cursor,s=l.slice_to(s),l.limit_backward=e,l.eq_v_b(s)&&l.slice_del()):l.limit_backward=e)}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.de.js b/docs/assets/javascripts/lunr/lunr.de.js new file mode 100644 index 000000000..576a21923 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.de.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.de=function(){this.pipeline.reset(),this.pipeline.add(e.de.trimmer,e.de.stopWordFilter,e.de.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.de.stemmer))},e.de.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.de.trimmer=e.trimmerSupport.generateTrimmer(e.de.wordCharacters),e.Pipeline.registerFunction(e.de.trimmer,"trimmer-de"),e.de.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!_.eq_s(1,e)||(_.ket=_.cursor,!_.in_grouping(w,97,252)))&&(_.slice_from(r),_.cursor=n,!0)}function i(){for(;!_.in_grouping(w,97,252);){if(_.cursor>=_.limit)return!0;_.cursor++}for(;!_.out_grouping(w,97,252);){if(_.cursor>=_.limit)return!0;_.cursor++}return!1}function s(){return u<=_.cursor}function t(){return c<=_.cursor}var o,c,u,a=[new r("",-1,6),new r("U",0,2),new r("Y",0,1),new r("ä",0,3),new r("ö",0,4),new r("ü",0,5)],d=[new r("e",-1,2),new r("em",-1,1),new r("en",-1,2),new r("ern",-1,1),new r("er",-1,1),new r("s",-1,3),new r("es",5,2)],l=[new r("en",-1,1),new r("er",-1,1),new r("st",-1,2),new r("est",2,1)],m=[new r("ig",-1,1),new r("lich",-1,1)],h=[new r("end",-1,1),new r("ig",-1,2),new r("ung",-1,1),new r("lich",-1,3),new r("isch",-1,2),new r("ik",-1,2),new r("heit",-1,3),new r("keit",-1,4)],w=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32,8],f=[117,30,5],b=[117,30,4],_=new n;this.setCurrent=function(e){_.setCurrent(e)},this.getCurrent=function(){return _.getCurrent()},this.stem=function(){var r=_.cursor;return function(){for(var r,n,i,s,t=_.cursor;;)if(r=_.cursor,_.bra=r,_.eq_s(1,"ß"))_.ket=_.cursor,_.slice_from("ss");else{if(r>=_.limit)break;_.cursor=r+1}for(_.cursor=t;;)for(n=_.cursor;;){if(i=_.cursor,_.in_grouping(w,97,252)){if(s=_.cursor,_.bra=s,e("u","U",i))break;if(_.cursor=s,e("y","Y",i))break}if(i>=_.limit)return void(_.cursor=n);_.cursor=i+1}}(),_.cursor=r,function(){u=_.limit,c=u;var e=_.cursor+3;0<=e&&e<=_.limit&&(o=e,i()||((u=_.cursor)=_.limit)return;_.cursor++}}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.de.stemmer,"stemmer-de"),e.de.stopWordFilter=e.generateStopWordFilter("aber alle allem allen aller alles als also am an ander andere anderem anderen anderer anderes anderm andern anderr anders auch auf aus bei bin bis bist da damit dann das dasselbe dazu daß dein deine deinem deinen deiner deines dem demselben den denn denselben der derer derselbe derselben des desselben dessen dich die dies diese dieselbe dieselben diesem diesen dieser dieses dir doch dort du durch ein eine einem einen einer eines einig einige einigem einigen einiger einiges einmal er es etwas euch euer eure eurem euren eurer eures für gegen gewesen hab habe haben hat hatte hatten hier hin hinter ich ihm ihn ihnen ihr ihre ihrem ihren ihrer ihres im in indem ins ist jede jedem jeden jeder jedes jene jenem jenen jener jenes jetzt kann kein keine keinem keinen keiner keines können könnte machen man manche manchem manchen mancher manches mein meine meinem meinen meiner meines mich mir mit muss musste nach nicht nichts noch nun nur ob oder ohne sehr sein seine seinem seinen seiner seines selbst sich sie sind so solche solchem solchen solcher solches soll sollte sondern sonst um und uns unse unsem unsen unser unses unter viel vom von vor war waren warst was weg weil weiter welche welchem welchen welcher welches wenn werde werden wie wieder will wir wird wirst wo wollen wollte während würde würden zu zum zur zwar zwischen über".split(" ")),e.Pipeline.registerFunction(e.de.stopWordFilter,"stopWordFilter-de")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.du.js b/docs/assets/javascripts/lunr/lunr.du.js new file mode 100644 index 000000000..c317652d6 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.du.js @@ -0,0 +1 @@ +!function(r,e){"function"==typeof define&&define.amd?define(e):"object"==typeof exports?module.exports=e():e()(r.lunr)}(this,function(){return function(r){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");r.du=function(){this.pipeline.reset(),this.pipeline.add(r.du.trimmer,r.du.stopWordFilter,r.du.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(r.du.stemmer))},r.du.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",r.du.trimmer=r.trimmerSupport.generateTrimmer(r.du.wordCharacters),r.Pipeline.registerFunction(r.du.trimmer,"trimmer-du"),r.du.stemmer=function(){var e=r.stemmerSupport.Among,i=r.stemmerSupport.SnowballProgram,n=new function(){function r(r){return v.cursor=r,r>=v.limit||(v.cursor++,!1)}function n(){for(;!v.in_grouping(g,97,232);){if(v.cursor>=v.limit)return!0;v.cursor++}for(;!v.out_grouping(g,97,232);){if(v.cursor>=v.limit)return!0;v.cursor++}return!1}function o(){return l<=v.cursor}function t(){return a<=v.cursor}function s(){var r=v.limit-v.cursor;v.find_among_b(_,3)&&(v.cursor=v.limit-r,v.ket=v.cursor,v.cursor>v.limit_backward&&(v.cursor--,v.bra=v.cursor,v.slice_del()))}function u(){var r;m=!1,v.ket=v.cursor,v.eq_s_b(1,"e")&&(v.bra=v.cursor,o()&&(r=v.limit-v.cursor,v.out_grouping_b(g,97,232)&&(v.cursor=v.limit-r,v.slice_del(),m=!0,s())))}function c(){var r;o()&&(r=v.limit-v.cursor,v.out_grouping_b(g,97,232)&&(v.cursor=v.limit-r,v.eq_s_b(3,"gem")||(v.cursor=v.limit-r,v.slice_del(),s())))}var a,l,m,d=[new e("",-1,6),new e("á",0,1),new e("ä",0,1),new e("é",0,2),new e("ë",0,2),new e("í",0,3),new e("ï",0,3),new e("ó",0,4),new e("ö",0,4),new e("ú",0,5),new e("ü",0,5)],f=[new e("",-1,3),new e("I",0,2),new e("Y",0,1)],_=[new e("dd",-1,-1),new e("kk",-1,-1),new e("tt",-1,-1)],w=[new e("ene",-1,2),new e("se",-1,3),new e("en",-1,2),new e("heden",2,1),new e("s",-1,3)],b=[new e("end",-1,1),new e("ig",-1,2),new e("ing",-1,1),new e("lijk",-1,3),new e("baar",-1,4),new e("bar",-1,5)],p=[new e("aa",-1,-1),new e("ee",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1)],g=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],h=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],k=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],v=new i;this.setCurrent=function(r){v.setCurrent(r)},this.getCurrent=function(){return v.getCurrent()},this.stem=function(){var e=v.cursor;return function(){for(var e,i,n,o=v.cursor;;){if(v.bra=v.cursor,e=v.find_among(d,11))switch(v.ket=v.cursor,e){case 1:v.slice_from("a");continue;case 2:v.slice_from("e");continue;case 3:v.slice_from("i");continue;case 4:v.slice_from("o");continue;case 5:v.slice_from("u");continue;case 6:if(v.cursor>=v.limit)break;v.cursor++;continue}break}for(v.cursor=o,v.bra=o,v.eq_s(1,"y")?(v.ket=v.cursor,v.slice_from("Y")):v.cursor=o;;)if(i=v.cursor,v.in_grouping(g,97,232)){if(n=v.cursor,v.bra=n,v.eq_s(1,"i"))v.ket=v.cursor,v.in_grouping(g,97,232)&&(v.slice_from("I"),v.cursor=i);else if(v.cursor=n,v.eq_s(1,"y"))v.ket=v.cursor,v.slice_from("Y"),v.cursor=i;else if(r(i))break}else if(r(i))break}(),v.cursor=e,l=v.limit,a=l,n()||((l=v.cursor)<3&&(l=3),n()||(a=v.cursor)),v.limit_backward=e,v.cursor=v.limit,function(){var r,e,i,n,a,l,d=v.limit-v.cursor;if(v.ket=v.cursor,r=v.find_among_b(w,5))switch(v.bra=v.cursor,r){case 1:o()&&v.slice_from("heid");break;case 2:c();break;case 3:o()&&v.out_grouping_b(k,97,232)&&v.slice_del()}if(v.cursor=v.limit-d,u(),v.cursor=v.limit-d,v.ket=v.cursor,v.eq_s_b(4,"heid")&&(v.bra=v.cursor,t()&&(e=v.limit-v.cursor,v.eq_s_b(1,"c")||(v.cursor=v.limit-e,v.slice_del(),v.ket=v.cursor,v.eq_s_b(2,"en")&&(v.bra=v.cursor,c())))),v.cursor=v.limit-d,v.ket=v.cursor,r=v.find_among_b(b,6))switch(v.bra=v.cursor,r){case 1:if(t()){if(v.slice_del(),i=v.limit-v.cursor,v.ket=v.cursor,v.eq_s_b(2,"ig")&&(v.bra=v.cursor,t()&&(n=v.limit-v.cursor,!v.eq_s_b(1,"e")))){v.cursor=v.limit-n,v.slice_del();break}v.cursor=v.limit-i,s()}break;case 2:t()&&(a=v.limit-v.cursor,v.eq_s_b(1,"e")||(v.cursor=v.limit-a,v.slice_del()));break;case 3:t()&&(v.slice_del(),u());break;case 4:t()&&v.slice_del();break;case 5:t()&&m&&v.slice_del()}v.cursor=v.limit-d,v.out_grouping_b(h,73,232)&&(l=v.limit-v.cursor,v.find_among_b(p,4)&&v.out_grouping_b(g,97,232)&&(v.cursor=v.limit-l,v.ket=v.cursor,v.cursor>v.limit_backward&&(v.cursor--,v.bra=v.cursor,v.slice_del())))}(),v.cursor=v.limit_backward,function(){for(var r;;)if(v.bra=v.cursor,r=v.find_among(f,3))switch(v.ket=v.cursor,r){case 1:v.slice_from("y");break;case 2:v.slice_from("i");break;case 3:if(v.cursor>=v.limit)return;v.cursor++}}(),!0}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.du.stemmer,"stemmer-du"),r.du.stopWordFilter=r.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),r.Pipeline.registerFunction(r.du.stopWordFilter,"stopWordFilter-du")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.es.js b/docs/assets/javascripts/lunr/lunr.es.js new file mode 100644 index 000000000..5098feba4 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.es.js @@ -0,0 +1 @@ +!function(e,s){"function"==typeof define&&define.amd?define(s):"object"==typeof exports?module.exports=s():s()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.es=function(){this.pipeline.reset(),this.pipeline.add(e.es.trimmer,e.es.stopWordFilter,e.es.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.es.stemmer))},e.es.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.es.trimmer=e.trimmerSupport.generateTrimmer(e.es.wordCharacters),e.Pipeline.registerFunction(e.es.trimmer,"trimmer-es"),e.es.stemmer=function(){var s=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(){if(C.out_grouping(q,97,252)){for(;!C.in_grouping(q,97,252);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}return!0}function n(){var s,r=C.cursor;if(function(){if(C.in_grouping(q,97,252)){var s=C.cursor;if(e()){if(C.cursor=s,!C.in_grouping(q,97,252))return!0;for(;!C.out_grouping(q,97,252);){if(C.cursor>=C.limit)return!0;C.cursor++}}return!1}return!0}()){if(C.cursor=r,!C.out_grouping(q,97,252))return;if(s=C.cursor,e()){if(C.cursor=s,!C.in_grouping(q,97,252)||C.cursor>=C.limit)return;C.cursor++}}l=C.cursor}function i(){for(;!C.in_grouping(q,97,252);){if(C.cursor>=C.limit)return!1;C.cursor++}for(;!C.out_grouping(q,97,252);){if(C.cursor>=C.limit)return!1;C.cursor++}return!0}function a(){return l<=C.cursor}function t(){return c<=C.cursor}function o(e,s){if(!t())return!0;C.slice_del(),C.ket=C.cursor;var r=C.find_among_b(e,s);return r&&(C.bra=C.cursor,1==r&&t()&&C.slice_del()),!1}function u(e){return!t()||(C.slice_del(),C.ket=C.cursor,C.eq_s_b(2,e)&&(C.bra=C.cursor,t()&&C.slice_del()),!1)}function w(){var e;if(C.ket=C.cursor,e=C.find_among_b(p,46)){switch(C.bra=C.cursor,e){case 1:if(!t())return!1;C.slice_del();break;case 2:if(u("ic"))return!1;break;case 3:if(!t())return!1;C.slice_from("log");break;case 4:if(!t())return!1;C.slice_from("u");break;case 5:if(!t())return!1;C.slice_from("ente");break;case 6:if(!(m<=C.cursor))return!1;C.slice_del(),C.ket=C.cursor,(e=C.find_among_b(_,4))&&(C.bra=C.cursor,t()&&(C.slice_del(),1==e&&(C.ket=C.cursor,C.eq_s_b(2,"at")&&(C.bra=C.cursor,t()&&C.slice_del()))));break;case 7:if(o(h,3))return!1;break;case 8:if(o(v,3))return!1;break;case 9:if(u("at"))return!1}return!0}return!1}var c,m,l,d=[new s("",-1,6),new s("á",0,1),new s("é",0,2),new s("í",0,3),new s("ó",0,4),new s("ú",0,5)],b=[new s("la",-1,-1),new s("sela",0,-1),new s("le",-1,-1),new s("me",-1,-1),new s("se",-1,-1),new s("lo",-1,-1),new s("selo",5,-1),new s("las",-1,-1),new s("selas",7,-1),new s("les",-1,-1),new s("los",-1,-1),new s("selos",10,-1),new s("nos",-1,-1)],f=[new s("ando",-1,6),new s("iendo",-1,6),new s("yendo",-1,7),new s("ándo",-1,2),new s("iéndo",-1,1),new s("ar",-1,6),new s("er",-1,6),new s("ir",-1,6),new s("ár",-1,3),new s("ér",-1,4),new s("ír",-1,5)],_=[new s("ic",-1,-1),new s("ad",-1,-1),new s("os",-1,-1),new s("iv",-1,1)],h=[new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,1)],v=[new s("ic",-1,1),new s("abil",-1,1),new s("iv",-1,1)],p=[new s("ica",-1,1),new s("ancia",-1,2),new s("encia",-1,5),new s("adora",-1,2),new s("osa",-1,1),new s("ista",-1,1),new s("iva",-1,9),new s("anza",-1,1),new s("logía",-1,3),new s("idad",-1,8),new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,2),new s("mente",-1,7),new s("amente",13,6),new s("ación",-1,2),new s("ución",-1,4),new s("ico",-1,1),new s("ismo",-1,1),new s("oso",-1,1),new s("amiento",-1,1),new s("imiento",-1,1),new s("ivo",-1,9),new s("ador",-1,2),new s("icas",-1,1),new s("ancias",-1,2),new s("encias",-1,5),new s("adoras",-1,2),new s("osas",-1,1),new s("istas",-1,1),new s("ivas",-1,9),new s("anzas",-1,1),new s("logías",-1,3),new s("idades",-1,8),new s("ables",-1,1),new s("ibles",-1,1),new s("aciones",-1,2),new s("uciones",-1,4),new s("adores",-1,2),new s("antes",-1,2),new s("icos",-1,1),new s("ismos",-1,1),new s("osos",-1,1),new s("amientos",-1,1),new s("imientos",-1,1),new s("ivos",-1,9)],g=[new s("ya",-1,1),new s("ye",-1,1),new s("yan",-1,1),new s("yen",-1,1),new s("yeron",-1,1),new s("yendo",-1,1),new s("yo",-1,1),new s("yas",-1,1),new s("yes",-1,1),new s("yais",-1,1),new s("yamos",-1,1),new s("yó",-1,1)],k=[new s("aba",-1,2),new s("ada",-1,2),new s("ida",-1,2),new s("ara",-1,2),new s("iera",-1,2),new s("ía",-1,2),new s("aría",5,2),new s("ería",5,2),new s("iría",5,2),new s("ad",-1,2),new s("ed",-1,2),new s("id",-1,2),new s("ase",-1,2),new s("iese",-1,2),new s("aste",-1,2),new s("iste",-1,2),new s("an",-1,2),new s("aban",16,2),new s("aran",16,2),new s("ieran",16,2),new s("ían",16,2),new s("arían",20,2),new s("erían",20,2),new s("irían",20,2),new s("en",-1,1),new s("asen",24,2),new s("iesen",24,2),new s("aron",-1,2),new s("ieron",-1,2),new s("arán",-1,2),new s("erán",-1,2),new s("irán",-1,2),new s("ado",-1,2),new s("ido",-1,2),new s("ando",-1,2),new s("iendo",-1,2),new s("ar",-1,2),new s("er",-1,2),new s("ir",-1,2),new s("as",-1,2),new s("abas",39,2),new s("adas",39,2),new s("idas",39,2),new s("aras",39,2),new s("ieras",39,2),new s("ías",39,2),new s("arías",45,2),new s("erías",45,2),new s("irías",45,2),new s("es",-1,1),new s("ases",49,2),new s("ieses",49,2),new s("abais",-1,2),new s("arais",-1,2),new s("ierais",-1,2),new s("íais",-1,2),new s("aríais",55,2),new s("eríais",55,2),new s("iríais",55,2),new s("aseis",-1,2),new s("ieseis",-1,2),new s("asteis",-1,2),new s("isteis",-1,2),new s("áis",-1,2),new s("éis",-1,1),new s("aréis",64,2),new s("eréis",64,2),new s("iréis",64,2),new s("ados",-1,2),new s("idos",-1,2),new s("amos",-1,2),new s("ábamos",70,2),new s("áramos",70,2),new s("iéramos",70,2),new s("íamos",70,2),new s("aríamos",74,2),new s("eríamos",74,2),new s("iríamos",74,2),new s("emos",-1,1),new s("aremos",78,2),new s("eremos",78,2),new s("iremos",78,2),new s("ásemos",78,2),new s("iésemos",78,2),new s("imos",-1,2),new s("arás",-1,2),new s("erás",-1,2),new s("irás",-1,2),new s("ís",-1,2),new s("ará",-1,2),new s("erá",-1,2),new s("irá",-1,2),new s("aré",-1,2),new s("eré",-1,2),new s("iré",-1,2),new s("ió",-1,2)],y=[new s("a",-1,1),new s("e",-1,2),new s("o",-1,1),new s("os",-1,1),new s("á",-1,1),new s("é",-1,2),new s("í",-1,1),new s("ó",-1,1)],q=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,4,10],C=new r;this.setCurrent=function(e){C.setCurrent(e)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var e=C.cursor;return function(){var e=C.cursor;l=C.limit,m=l,c=l,n(),C.cursor=e,i()&&(m=C.cursor,i()&&(c=C.cursor))}(),C.limit_backward=e,C.cursor=C.limit,function(){var e;if(C.ket=C.cursor,C.find_among_b(b,13)&&(C.bra=C.cursor,(e=C.find_among_b(f,11))&&a()))switch(e){case 1:C.bra=C.cursor,C.slice_from("iendo");break;case 2:C.bra=C.cursor,C.slice_from("ando");break;case 3:C.bra=C.cursor,C.slice_from("ar");break;case 4:C.bra=C.cursor,C.slice_from("er");break;case 5:C.bra=C.cursor,C.slice_from("ir");break;case 6:C.slice_del();break;case 7:C.eq_s_b(1,"u")&&C.slice_del()}}(),C.cursor=C.limit,w()||(C.cursor=C.limit,function(){var e,s;if(C.cursor>=l&&(s=C.limit_backward,C.limit_backward=l,C.ket=C.cursor,e=C.find_among_b(g,12),C.limit_backward=s,e)){if(C.bra=C.cursor,1==e){if(!C.eq_s_b(1,"u"))return!1;C.slice_del()}return!0}return!1}()||(C.cursor=C.limit,function(){var e,s,r,n;if(C.cursor>=l&&(s=C.limit_backward,C.limit_backward=l,C.ket=C.cursor,e=C.find_among_b(k,96),C.limit_backward=s,e))switch(C.bra=C.cursor,e){case 1:r=C.limit-C.cursor,C.eq_s_b(1,"u")?(n=C.limit-C.cursor,C.eq_s_b(1,"g")?C.cursor=C.limit-n:C.cursor=C.limit-r):C.cursor=C.limit-r,C.bra=C.cursor;case 2:C.slice_del()}}())),C.cursor=C.limit,function(){var e,s;if(C.ket=C.cursor,e=C.find_among_b(y,8))switch(C.bra=C.cursor,e){case 1:a()&&C.slice_del();break;case 2:a()&&(C.slice_del(),C.ket=C.cursor,C.eq_s_b(1,"u")&&(C.bra=C.cursor,s=C.limit-C.cursor,C.eq_s_b(1,"g")&&(C.cursor=C.limit-s,a()&&C.slice_del())))}}(),C.cursor=C.limit_backward,function(){for(var e;;){if(C.bra=C.cursor,e=C.find_among(d,6))switch(C.ket=C.cursor,e){case 1:C.slice_from("a");continue;case 2:C.slice_from("e");continue;case 3:C.slice_from("i");continue;case 4:C.slice_from("o");continue;case 5:C.slice_from("u");continue;case 6:if(C.cursor>=C.limit)break;C.cursor++;continue}break}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.es.stemmer,"stemmer-es"),e.es.stopWordFilter=e.generateStopWordFilter("a al algo algunas algunos ante antes como con contra cual cuando de del desde donde durante e el ella ellas ellos en entre era erais eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estamos estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos ha habida habidas habido habidos habiendo habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías han has hasta hay haya hayamos hayan hayas hayáis he hemos hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo la las le les lo los me mi mis mucho muchos muy más mí mía mías mío míos nada ni no nos nosotras nosotros nuestra nuestras nuestro nuestros o os otra otras otro otros para pero poco por porque que quien quienes qué se sea seamos sean seas seremos será serán serás seré seréis sería seríais seríamos serían serías seáis sido siendo sin sobre sois somos son soy su sus suya suyas suyo suyos sí también tanto te tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened tenemos tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías ti tiene tienen tienes todo todos tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú un una uno unos vosotras vosotros vuestra vuestras vuestro vuestros y ya yo él éramos".split(" ")),e.Pipeline.registerFunction(e.es.stopWordFilter,"stopWordFilter-es")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.fi.js b/docs/assets/javascripts/lunr/lunr.fi.js new file mode 100644 index 000000000..63b494bd3 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.fi.js @@ -0,0 +1 @@ +!function(i,e){"function"==typeof define&&define.amd?define(e):"object"==typeof exports?module.exports=e():e()(i.lunr)}(this,function(){return function(i){if(void 0===i)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===i.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");i.fi=function(){this.pipeline.reset(),this.pipeline.add(i.fi.trimmer,i.fi.stopWordFilter,i.fi.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(i.fi.stemmer))},i.fi.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",i.fi.trimmer=i.trimmerSupport.generateTrimmer(i.fi.wordCharacters),i.Pipeline.registerFunction(i.fi.trimmer,"trimmer-fi"),i.fi.stemmer=function(){var e=i.stemmerSupport.Among,r=i.stemmerSupport.SnowballProgram,n=new function(){function i(){for(var i;;){if(i=C.cursor,C.in_grouping(j,97,246))break;if(C.cursor=i,i>=C.limit)return!0;C.cursor++}for(C.cursor=i;!C.out_grouping(j,97,246);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}function n(){var i,e;if(C.cursor>=u)if(e=C.limit_backward,C.limit_backward=u,C.ket=C.cursor,i=C.find_among_b(c,10)){switch(C.bra=C.cursor,C.limit_backward=e,i){case 1:if(!C.in_grouping_b(q,97,246))return;break;case 2:if(!(a<=C.cursor))return}C.slice_del()}else C.limit_backward=e}function t(){return C.find_among_b(b,7)}function s(){return C.eq_s_b(1,"i")&&C.in_grouping_b(v,97,246)}var o,l,a,u,c=[new e("pa",-1,1),new e("sti",-1,2),new e("kaan",-1,1),new e("han",-1,1),new e("kin",-1,1),new e("hän",-1,1),new e("kään",-1,1),new e("ko",-1,1),new e("pä",-1,1),new e("kö",-1,1)],m=[new e("lla",-1,-1),new e("na",-1,-1),new e("ssa",-1,-1),new e("ta",-1,-1),new e("lta",3,-1),new e("sta",3,-1)],w=[new e("llä",-1,-1),new e("nä",-1,-1),new e("ssä",-1,-1),new e("tä",-1,-1),new e("ltä",3,-1),new e("stä",3,-1)],_=[new e("lle",-1,-1),new e("ine",-1,-1)],k=[new e("nsa",-1,3),new e("mme",-1,3),new e("nne",-1,3),new e("ni",-1,2),new e("si",-1,1),new e("an",-1,4),new e("en",-1,6),new e("än",-1,5),new e("nsä",-1,3)],b=[new e("aa",-1,-1),new e("ee",-1,-1),new e("ii",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1),new e("ää",-1,-1),new e("öö",-1,-1)],d=[new e("a",-1,8),new e("lla",0,-1),new e("na",0,-1),new e("ssa",0,-1),new e("ta",0,-1),new e("lta",4,-1),new e("sta",4,-1),new e("tta",4,9),new e("lle",-1,-1),new e("ine",-1,-1),new e("ksi",-1,-1),new e("n",-1,7),new e("han",11,1),new e("den",11,-1,s),new e("seen",11,-1,t),new e("hen",11,2),new e("tten",11,-1,s),new e("hin",11,3),new e("siin",11,-1,s),new e("hon",11,4),new e("hän",11,5),new e("hön",11,6),new e("ä",-1,8),new e("llä",22,-1),new e("nä",22,-1),new e("ssä",22,-1),new e("tä",22,-1),new e("ltä",26,-1),new e("stä",26,-1),new e("ttä",26,9)],f=[new e("eja",-1,-1),new e("mma",-1,1),new e("imma",1,-1),new e("mpa",-1,1),new e("impa",3,-1),new e("mmi",-1,1),new e("immi",5,-1),new e("mpi",-1,1),new e("impi",7,-1),new e("ejä",-1,-1),new e("mmä",-1,1),new e("immä",10,-1),new e("mpä",-1,1),new e("impä",12,-1)],h=[new e("i",-1,-1),new e("j",-1,-1)],p=[new e("mma",-1,1),new e("imma",0,-1)],g=[17,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8],j=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],v=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],q=[17,97,24,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],C=new r;this.setCurrent=function(i){C.setCurrent(i)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var e=C.cursor;return u=C.limit,a=u,i()||(u=C.cursor,i()||(a=C.cursor)),o=!1,C.limit_backward=e,C.cursor=C.limit,n(),C.cursor=C.limit,function(){var i,e,r;if(C.cursor>=u)if(e=C.limit_backward,C.limit_backward=u,C.ket=C.cursor,i=C.find_among_b(k,9))switch(C.bra=C.cursor,C.limit_backward=e,i){case 1:r=C.limit-C.cursor,C.eq_s_b(1,"k")||(C.cursor=C.limit-r,C.slice_del());break;case 2:C.slice_del(),C.ket=C.cursor,C.eq_s_b(3,"kse")&&(C.bra=C.cursor,C.slice_from("ksi"));break;case 3:C.slice_del();break;case 4:C.find_among_b(m,6)&&C.slice_del();break;case 5:C.find_among_b(w,6)&&C.slice_del();break;case 6:C.find_among_b(_,2)&&C.slice_del()}else C.limit_backward=e}(),C.cursor=C.limit,function(){var i,e,r;if(C.cursor>=u)if(e=C.limit_backward,C.limit_backward=u,C.ket=C.cursor,i=C.find_among_b(d,30)){switch(C.bra=C.cursor,C.limit_backward=e,i){case 1:if(!C.eq_s_b(1,"a"))return;break;case 2:case 9:if(!C.eq_s_b(1,"e"))return;break;case 3:if(!C.eq_s_b(1,"i"))return;break;case 4:if(!C.eq_s_b(1,"o"))return;break;case 5:if(!C.eq_s_b(1,"ä"))return;break;case 6:if(!C.eq_s_b(1,"ö"))return;break;case 7:if(r=C.limit-C.cursor,!t()&&(C.cursor=C.limit-r,!C.eq_s_b(2,"ie"))){C.cursor=C.limit-r;break}if(C.cursor=C.limit-r,C.cursor<=C.limit_backward){C.cursor=C.limit-r;break}C.cursor--,C.bra=C.cursor;break;case 8:if(!C.in_grouping_b(j,97,246)||!C.out_grouping_b(j,97,246))return}C.slice_del(),o=!0}else C.limit_backward=e}(),C.cursor=C.limit,function(){var i,e,r;if(C.cursor>=a)if(e=C.limit_backward,C.limit_backward=a,C.ket=C.cursor,i=C.find_among_b(f,14)){if(C.bra=C.cursor,C.limit_backward=e,1==i){if(r=C.limit-C.cursor,C.eq_s_b(2,"po"))return;C.cursor=C.limit-r}C.slice_del()}else C.limit_backward=e}(),C.cursor=C.limit,o?(!function(){var i;C.cursor>=u&&(i=C.limit_backward,C.limit_backward=u,C.ket=C.cursor,C.find_among_b(h,2)?(C.bra=C.cursor,C.limit_backward=i,C.slice_del()):C.limit_backward=i)}(),C.cursor=C.limit):(C.cursor=C.limit,function(){var i,e,r,n,t,s;if(C.cursor>=u){if(e=C.limit_backward,C.limit_backward=u,C.ket=C.cursor,C.eq_s_b(1,"t")&&(C.bra=C.cursor,r=C.limit-C.cursor,C.in_grouping_b(j,97,246)&&(C.cursor=C.limit-r,C.slice_del(),C.limit_backward=e,n=C.limit-C.cursor,C.cursor>=a&&(C.cursor=a,t=C.limit_backward,C.limit_backward=C.cursor,C.cursor=C.limit-n,C.ket=C.cursor,i=C.find_among_b(p,2))))){if(C.bra=C.cursor,C.limit_backward=t,1==i){if(s=C.limit-C.cursor,C.eq_s_b(2,"po"))return;C.cursor=C.limit-s}return void C.slice_del()}C.limit_backward=e}}(),C.cursor=C.limit),function(){var i,e,r,n;if(C.cursor>=u){for(i=C.limit_backward,C.limit_backward=u,e=C.limit-C.cursor,t()&&(C.cursor=C.limit-e,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del())),C.cursor=C.limit-e,C.ket=C.cursor,C.in_grouping_b(g,97,228)&&(C.bra=C.cursor,C.out_grouping_b(j,97,246)&&C.slice_del()),C.cursor=C.limit-e,C.ket=C.cursor,C.eq_s_b(1,"j")&&(C.bra=C.cursor,r=C.limit-C.cursor,C.eq_s_b(1,"o")?C.slice_del():(C.cursor=C.limit-r,C.eq_s_b(1,"u")&&C.slice_del())),C.cursor=C.limit-e,C.ket=C.cursor,C.eq_s_b(1,"o")&&(C.bra=C.cursor,C.eq_s_b(1,"j")&&C.slice_del()),C.cursor=C.limit-e,C.limit_backward=i;;){if(n=C.limit-C.cursor,C.out_grouping_b(j,97,246)){C.cursor=C.limit-n;break}if(C.cursor=C.limit-n,C.cursor<=C.limit_backward)return;C.cursor--}C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,l=C.slice_to(),C.eq_v_b(l)&&C.slice_del())}}(),!0}};return function(i){return"function"==typeof i.update?i.update(function(i){return n.setCurrent(i),n.stem(),n.getCurrent()}):(n.setCurrent(i),n.stem(),n.getCurrent())}}(),i.Pipeline.registerFunction(i.fi.stemmer,"stemmer-fi"),i.fi.stopWordFilter=i.generateStopWordFilter("ei eivät emme en et ette että he heidän heidät heihin heille heillä heiltä heissä heistä heitä hän häneen hänelle hänellä häneltä hänen hänessä hänestä hänet häntä itse ja johon joiden joihin joiksi joilla joille joilta joina joissa joista joita joka joksi jolla jolle jolta jona jonka jos jossa josta jota jotka kanssa keiden keihin keiksi keille keillä keiltä keinä keissä keistä keitä keneen keneksi kenelle kenellä keneltä kenen kenenä kenessä kenestä kenet ketkä ketkä ketä koska kuin kuka kun me meidän meidät meihin meille meillä meiltä meissä meistä meitä mihin miksi mikä mille millä miltä minkä minkä minua minulla minulle minulta minun minussa minusta minut minuun minä minä missä mistä mitkä mitä mukaan mutta ne niiden niihin niiksi niille niillä niiltä niin niin niinä niissä niistä niitä noiden noihin noiksi noilla noille noilta noin noina noissa noista noita nuo nyt näiden näihin näiksi näille näillä näiltä näinä näissä näistä näitä nämä ole olemme olen olet olette oli olimme olin olisi olisimme olisin olisit olisitte olisivat olit olitte olivat olla olleet ollut on ovat poikki se sekä sen siihen siinä siitä siksi sille sillä sillä siltä sinua sinulla sinulle sinulta sinun sinussa sinusta sinut sinuun sinä sinä sitä tai te teidän teidät teihin teille teillä teiltä teissä teistä teitä tuo tuohon tuoksi tuolla tuolle tuolta tuon tuona tuossa tuosta tuota tähän täksi tälle tällä tältä tämä tämän tänä tässä tästä tätä vaan vai vaikka yli".split(" ")),i.Pipeline.registerFunction(i.fi.stopWordFilter,"stopWordFilter-fi")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.fr.js b/docs/assets/javascripts/lunr/lunr.fr.js new file mode 100644 index 000000000..ae9f8cf6b --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.fr.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.fr=function(){this.pipeline.reset(),this.pipeline.add(e.fr.trimmer,e.fr.stopWordFilter,e.fr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.fr.stemmer))},e.fr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.fr.trimmer=e.trimmerSupport.generateTrimmer(e.fr.wordCharacters),e.Pipeline.registerFunction(e.fr.trimmer,"trimmer-fr"),e.fr.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,s){return!(!z.eq_s(1,e)||(z.ket=z.cursor,!z.in_grouping(v,97,251)))&&(z.slice_from(r),z.cursor=s,!0)}function i(e,r,s){return!!z.eq_s(1,e)&&(z.ket=z.cursor,z.slice_from(r),z.cursor=s,!0)}function n(){for(;!z.in_grouping(v,97,251);){if(z.cursor>=z.limit)return!0;z.cursor++}for(;!z.out_grouping(v,97,251);){if(z.cursor>=z.limit)return!0;z.cursor++}return!1}function t(){return w<=z.cursor}function u(){return l<=z.cursor}function o(){return a<=z.cursor}function c(){if(!function(){var e,r;if(z.ket=z.cursor,e=z.find_among_b(d,43)){switch(z.bra=z.cursor,e){case 1:if(!o())return!1;z.slice_del();break;case 2:if(!o())return!1;z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"ic")&&(z.bra=z.cursor,o()?z.slice_del():z.slice_from("iqU"));break;case 3:if(!o())return!1;z.slice_from("log");break;case 4:if(!o())return!1;z.slice_from("u");break;case 5:if(!o())return!1;z.slice_from("ent");break;case 6:if(!t())return!1;if(z.slice_del(),z.ket=z.cursor,e=z.find_among_b(_,6))switch(z.bra=z.cursor,e){case 1:o()&&(z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,o()&&z.slice_del()));break;case 2:o()?z.slice_del():u()&&z.slice_from("eux");break;case 3:o()&&z.slice_del();break;case 4:t()&&z.slice_from("i")}break;case 7:if(!o())return!1;if(z.slice_del(),z.ket=z.cursor,e=z.find_among_b(b,3))switch(z.bra=z.cursor,e){case 1:o()?z.slice_del():z.slice_from("abl");break;case 2:o()?z.slice_del():z.slice_from("iqU");break;case 3:o()&&z.slice_del()}break;case 8:if(!o())return!1;if(z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,o()&&(z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"ic")))){z.bra=z.cursor,o()?z.slice_del():z.slice_from("iqU");break}break;case 9:z.slice_from("eau");break;case 10:if(!u())return!1;z.slice_from("al");break;case 11:if(o())z.slice_del();else{if(!u())return!1;z.slice_from("eux")}break;case 12:if(!u()||!z.out_grouping_b(v,97,251))return!1;z.slice_del();break;case 13:return t()&&z.slice_from("ant"),!1;case 14:return t()&&z.slice_from("ent"),!1;case 15:return r=z.limit-z.cursor,z.in_grouping_b(v,97,251)&&t()&&(z.cursor=z.limit-r,z.slice_del()),!1}return!0}return!1}()&&(z.cursor=z.limit,!function(){var e,r;if(z.cursor=w){if(s=z.limit_backward,z.limit_backward=w,z.ket=z.cursor,e=z.find_among_b(g,7))switch(z.bra=z.cursor,e){case 1:if(o()){if(i=z.limit-z.cursor,!z.eq_s_b(1,"s")&&(z.cursor=z.limit-i,!z.eq_s_b(1,"t")))break;z.slice_del()}break;case 2:z.slice_from("i");break;case 3:z.slice_del();break;case 4:z.eq_s_b(2,"gu")&&z.slice_del()}z.limit_backward=s}}();z.cursor=z.limit,z.ket=z.cursor,z.eq_s_b(1,"Y")?(z.bra=z.cursor,z.slice_from("i")):(z.cursor=z.limit,z.eq_s_b(1,"ç")&&(z.bra=z.cursor,z.slice_from("c")))}var a,l,w,f=[new r("col",-1,-1),new r("par",-1,-1),new r("tap",-1,-1)],m=[new r("",-1,4),new r("I",0,1),new r("U",0,2),new r("Y",0,3)],_=[new r("iqU",-1,3),new r("abl",-1,3),new r("Ièr",-1,4),new r("ièr",-1,4),new r("eus",-1,2),new r("iv",-1,1)],b=[new r("ic",-1,2),new r("abil",-1,1),new r("iv",-1,3)],d=[new r("iqUe",-1,1),new r("atrice",-1,2),new r("ance",-1,1),new r("ence",-1,5),new r("logie",-1,3),new r("able",-1,1),new r("isme",-1,1),new r("euse",-1,11),new r("iste",-1,1),new r("ive",-1,8),new r("if",-1,8),new r("usion",-1,4),new r("ation",-1,2),new r("ution",-1,4),new r("ateur",-1,2),new r("iqUes",-1,1),new r("atrices",-1,2),new r("ances",-1,1),new r("ences",-1,5),new r("logies",-1,3),new r("ables",-1,1),new r("ismes",-1,1),new r("euses",-1,11),new r("istes",-1,1),new r("ives",-1,8),new r("ifs",-1,8),new r("usions",-1,4),new r("ations",-1,2),new r("utions",-1,4),new r("ateurs",-1,2),new r("ments",-1,15),new r("ements",30,6),new r("issements",31,12),new r("ités",-1,7),new r("ment",-1,15),new r("ement",34,6),new r("issement",35,12),new r("amment",34,13),new r("emment",34,14),new r("aux",-1,10),new r("eaux",39,9),new r("eux",-1,1),new r("ité",-1,7)],k=[new r("ira",-1,1),new r("ie",-1,1),new r("isse",-1,1),new r("issante",-1,1),new r("i",-1,1),new r("irai",4,1),new r("ir",-1,1),new r("iras",-1,1),new r("ies",-1,1),new r("îmes",-1,1),new r("isses",-1,1),new r("issantes",-1,1),new r("îtes",-1,1),new r("is",-1,1),new r("irais",13,1),new r("issais",13,1),new r("irions",-1,1),new r("issions",-1,1),new r("irons",-1,1),new r("issons",-1,1),new r("issants",-1,1),new r("it",-1,1),new r("irait",21,1),new r("issait",21,1),new r("issant",-1,1),new r("iraIent",-1,1),new r("issaIent",-1,1),new r("irent",-1,1),new r("issent",-1,1),new r("iront",-1,1),new r("ît",-1,1),new r("iriez",-1,1),new r("issiez",-1,1),new r("irez",-1,1),new r("issez",-1,1)],p=[new r("a",-1,3),new r("era",0,2),new r("asse",-1,3),new r("ante",-1,3),new r("ée",-1,2),new r("ai",-1,3),new r("erai",5,2),new r("er",-1,2),new r("as",-1,3),new r("eras",8,2),new r("âmes",-1,3),new r("asses",-1,3),new r("antes",-1,3),new r("âtes",-1,3),new r("ées",-1,2),new r("ais",-1,3),new r("erais",15,2),new r("ions",-1,1),new r("erions",17,2),new r("assions",17,3),new r("erons",-1,2),new r("ants",-1,3),new r("és",-1,2),new r("ait",-1,3),new r("erait",23,2),new r("ant",-1,3),new r("aIent",-1,3),new r("eraIent",26,2),new r("èrent",-1,2),new r("assent",-1,3),new r("eront",-1,2),new r("ât",-1,3),new r("ez",-1,2),new r("iez",32,2),new r("eriez",33,2),new r("assiez",33,3),new r("erez",32,2),new r("é",-1,2)],g=[new r("e",-1,3),new r("Ière",0,2),new r("ière",0,2),new r("ion",-1,1),new r("Ier",-1,2),new r("ier",-1,2),new r("ë",-1,4)],q=[new r("ell",-1,-1),new r("eill",-1,-1),new r("enn",-1,-1),new r("onn",-1,-1),new r("ett",-1,-1)],v=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,128,130,103,8,5],h=[1,65,20,0,0,0,0,0,0,0,0,0,0,0,0,0,128],z=new s;this.setCurrent=function(e){z.setCurrent(e)},this.getCurrent=function(){return z.getCurrent()},this.stem=function(){var r=z.cursor;return function(){for(var r,s;;){if(r=z.cursor,z.in_grouping(v,97,251)){if(z.bra=z.cursor,s=z.cursor,e("u","U",r))continue;if(z.cursor=s,e("i","I",r))continue;if(z.cursor=s,i("y","Y",r))continue}if(z.cursor=r,z.bra=r,!e("y","Y",r)){if(z.cursor=r,z.eq_s(1,"q")&&(z.bra=z.cursor,i("u","U",r)))continue;if(z.cursor=r,r>=z.limit)return;z.cursor++}}}(),z.cursor=r,function(){var e=z.cursor;if(w=z.limit,l=w,a=w,z.in_grouping(v,97,251)&&z.in_grouping(v,97,251)&&z.cursor=z.limit){z.cursor=w;break}z.cursor++}while(!z.in_grouping(v,97,251))}w=z.cursor,z.cursor=e,n()||(l=z.cursor,n()||(a=z.cursor))}(),z.limit_backward=r,z.cursor=z.limit,c(),z.cursor=z.limit,function(){var e=z.limit-z.cursor;z.find_among_b(q,5)&&(z.cursor=z.limit-e,z.ket=z.cursor,z.cursor>z.limit_backward&&(z.cursor--,z.bra=z.cursor,z.slice_del()))}(),z.cursor=z.limit,function(){for(var e,r=1;z.out_grouping_b(v,97,251);)r--;if(r<=0){if(z.ket=z.cursor,e=z.limit-z.cursor,!z.eq_s_b(1,"é")&&(z.cursor=z.limit-e,!z.eq_s_b(1,"è")))return;z.bra=z.cursor,z.slice_from("e")}}(),z.cursor=z.limit_backward,function(){for(var e,r;r=z.cursor,z.bra=r,e=z.find_among(m,4);)switch(z.ket=z.cursor,e){case 1:z.slice_from("i");break;case 2:z.slice_from("u");break;case 3:z.slice_from("y");break;case 4:if(z.cursor>=z.limit)return;z.cursor++}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.fr.stemmer,"stemmer-fr"),e.fr.stopWordFilter=e.generateStopWordFilter("ai aie aient aies ait as au aura aurai auraient aurais aurait auras aurez auriez aurions aurons auront aux avaient avais avait avec avez aviez avions avons ayant ayez ayons c ce ceci celà ces cet cette d dans de des du elle en es est et eu eue eues eurent eus eusse eussent eusses eussiez eussions eut eux eûmes eût eûtes furent fus fusse fussent fusses fussiez fussions fut fûmes fût fûtes ici il ils j je l la le les leur leurs lui m ma mais me mes moi mon même n ne nos notre nous on ont ou par pas pour qu que quel quelle quelles quels qui s sa sans se sera serai seraient serais serait seras serez seriez serions serons seront ses soi soient sois soit sommes son sont soyez soyons suis sur t ta te tes toi ton tu un une vos votre vous y à étaient étais était étant étiez étions été étée étées étés êtes".split(" ")),e.Pipeline.registerFunction(e.fr.stopWordFilter,"stopWordFilter-fr")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.hu.js b/docs/assets/javascripts/lunr/lunr.hu.js new file mode 100644 index 000000000..0f56d6983 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.hu.js @@ -0,0 +1 @@ +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.hu=function(){this.pipeline.reset(),this.pipeline.add(e.hu.trimmer,e.hu.stopWordFilter,e.hu.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.hu.stemmer))},e.hu.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.hu.trimmer=e.trimmerSupport.generateTrimmer(e.hu.wordCharacters),e.Pipeline.registerFunction(e.hu.trimmer,"trimmer-hu"),e.hu.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,i=new function(){function e(){return s<=_.cursor}function i(){var e=_.limit-_.cursor;return!!_.find_among_b(w,23)&&(_.cursor=_.limit-e,!0)}function a(){if(_.cursor>_.limit_backward){_.cursor--,_.ket=_.cursor;var e=_.cursor-1;_.limit_backward<=e&&e<=_.limit&&(_.cursor=e,_.bra=e,_.slice_del())}}function t(){_.ket=_.cursor,_.find_among_b(u,44)&&(_.bra=_.cursor,e()&&(_.slice_del(),function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(o,2))&&(_.bra=_.cursor,e()))switch(n){case 1:_.slice_from("a");break;case 2:_.slice_from("e")}}()))}var s,c=[new n("cs",-1,-1),new n("dzs",-1,-1),new n("gy",-1,-1),new n("ly",-1,-1),new n("ny",-1,-1),new n("sz",-1,-1),new n("ty",-1,-1),new n("zs",-1,-1)],o=[new n("á",-1,1),new n("é",-1,2)],w=[new n("bb",-1,-1),new n("cc",-1,-1),new n("dd",-1,-1),new n("ff",-1,-1),new n("gg",-1,-1),new n("jj",-1,-1),new n("kk",-1,-1),new n("ll",-1,-1),new n("mm",-1,-1),new n("nn",-1,-1),new n("pp",-1,-1),new n("rr",-1,-1),new n("ccs",-1,-1),new n("ss",-1,-1),new n("zzs",-1,-1),new n("tt",-1,-1),new n("vv",-1,-1),new n("ggy",-1,-1),new n("lly",-1,-1),new n("nny",-1,-1),new n("tty",-1,-1),new n("ssz",-1,-1),new n("zz",-1,-1)],l=[new n("al",-1,1),new n("el",-1,2)],u=[new n("ba",-1,-1),new n("ra",-1,-1),new n("be",-1,-1),new n("re",-1,-1),new n("ig",-1,-1),new n("nak",-1,-1),new n("nek",-1,-1),new n("val",-1,-1),new n("vel",-1,-1),new n("ul",-1,-1),new n("nál",-1,-1),new n("nél",-1,-1),new n("ból",-1,-1),new n("ról",-1,-1),new n("tól",-1,-1),new n("bõl",-1,-1),new n("rõl",-1,-1),new n("tõl",-1,-1),new n("ül",-1,-1),new n("n",-1,-1),new n("an",19,-1),new n("ban",20,-1),new n("en",19,-1),new n("ben",22,-1),new n("képpen",22,-1),new n("on",19,-1),new n("ön",19,-1),new n("képp",-1,-1),new n("kor",-1,-1),new n("t",-1,-1),new n("at",29,-1),new n("et",29,-1),new n("ként",29,-1),new n("anként",32,-1),new n("enként",32,-1),new n("onként",32,-1),new n("ot",29,-1),new n("ért",29,-1),new n("öt",29,-1),new n("hez",-1,-1),new n("hoz",-1,-1),new n("höz",-1,-1),new n("vá",-1,-1),new n("vé",-1,-1)],m=[new n("án",-1,2),new n("én",-1,1),new n("ánként",-1,3)],k=[new n("stul",-1,2),new n("astul",0,1),new n("ástul",0,3),new n("stül",-1,2),new n("estül",3,1),new n("éstül",3,4)],f=[new n("á",-1,1),new n("é",-1,2)],b=[new n("k",-1,7),new n("ak",0,4),new n("ek",0,6),new n("ok",0,5),new n("ák",0,1),new n("ék",0,2),new n("ök",0,3)],d=[new n("éi",-1,7),new n("áéi",0,6),new n("ééi",0,5),new n("é",-1,9),new n("ké",3,4),new n("aké",4,1),new n("eké",4,1),new n("oké",4,1),new n("áké",4,3),new n("éké",4,2),new n("öké",4,1),new n("éé",3,8)],g=[new n("a",-1,18),new n("ja",0,17),new n("d",-1,16),new n("ad",2,13),new n("ed",2,13),new n("od",2,13),new n("ád",2,14),new n("éd",2,15),new n("öd",2,13),new n("e",-1,18),new n("je",9,17),new n("nk",-1,4),new n("unk",11,1),new n("ánk",11,2),new n("énk",11,3),new n("ünk",11,1),new n("uk",-1,8),new n("juk",16,7),new n("ájuk",17,5),new n("ük",-1,8),new n("jük",19,7),new n("éjük",20,6),new n("m",-1,12),new n("am",22,9),new n("em",22,9),new n("om",22,9),new n("ám",22,10),new n("ém",22,11),new n("o",-1,18),new n("á",-1,19),new n("é",-1,20)],h=[new n("id",-1,10),new n("aid",0,9),new n("jaid",1,6),new n("eid",0,9),new n("jeid",3,6),new n("áid",0,7),new n("éid",0,8),new n("i",-1,15),new n("ai",7,14),new n("jai",8,11),new n("ei",7,14),new n("jei",10,11),new n("ái",7,12),new n("éi",7,13),new n("itek",-1,24),new n("eitek",14,21),new n("jeitek",15,20),new n("éitek",14,23),new n("ik",-1,29),new n("aik",18,26),new n("jaik",19,25),new n("eik",18,26),new n("jeik",21,25),new n("áik",18,27),new n("éik",18,28),new n("ink",-1,20),new n("aink",25,17),new n("jaink",26,16),new n("eink",25,17),new n("jeink",28,16),new n("áink",25,18),new n("éink",25,19),new n("aitok",-1,21),new n("jaitok",32,20),new n("áitok",-1,22),new n("im",-1,5),new n("aim",35,4),new n("jaim",36,1),new n("eim",35,4),new n("jeim",38,1),new n("áim",35,2),new n("éim",35,3)],p=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,52,14],_=new r;this.setCurrent=function(e){_.setCurrent(e)},this.getCurrent=function(){return _.getCurrent()},this.stem=function(){var n=_.cursor;return function(){var e,n=_.cursor;if(s=_.limit,_.in_grouping(p,97,252))for(;;){if(e=_.cursor,_.out_grouping(p,97,252))return _.cursor=e,_.find_among(c,8)||(_.cursor=e,e<_.limit&&_.cursor++),void(s=_.cursor);if(_.cursor=e,e>=_.limit)return void(s=e);_.cursor++}if(_.cursor=n,_.out_grouping(p,97,252)){for(;!_.in_grouping(p,97,252);){if(_.cursor>=_.limit)return;_.cursor++}s=_.cursor}}(),_.limit_backward=n,_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(l,2))&&(_.bra=_.cursor,e())){if((1==n||2==n)&&!i())return;_.slice_del(),a()}}(),_.cursor=_.limit,t(),_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(m,3))&&(_.bra=_.cursor,e()))switch(n){case 1:_.slice_from("e");break;case 2:case 3:_.slice_from("a")}}(),_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(k,6))&&(_.bra=_.cursor,e()))switch(n){case 1:case 2:_.slice_del();break;case 3:_.slice_from("a");break;case 4:_.slice_from("e")}}(),_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(f,2))&&(_.bra=_.cursor,e())){if((1==n||2==n)&&!i())return;_.slice_del(),a()}}(),_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(d,12))&&(_.bra=_.cursor,e()))switch(n){case 1:case 4:case 7:case 9:_.slice_del();break;case 2:case 5:case 8:_.slice_from("e");break;case 3:case 6:_.slice_from("a")}}(),_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(g,31))&&(_.bra=_.cursor,e()))switch(n){case 1:case 4:case 7:case 8:case 9:case 12:case 13:case 16:case 17:case 18:_.slice_del();break;case 2:case 5:case 10:case 14:case 19:_.slice_from("a");break;case 3:case 6:case 11:case 15:case 20:_.slice_from("e")}}(),_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(h,42))&&(_.bra=_.cursor,e()))switch(n){case 1:case 4:case 5:case 6:case 9:case 10:case 11:case 14:case 15:case 16:case 17:case 20:case 21:case 24:case 25:case 26:case 29:_.slice_del();break;case 2:case 7:case 12:case 18:case 22:case 27:_.slice_from("a");break;case 3:case 8:case 13:case 19:case 23:case 28:_.slice_from("e")}}(),_.cursor=_.limit,function(){var n;if(_.ket=_.cursor,(n=_.find_among_b(b,7))&&(_.bra=_.cursor,e()))switch(n){case 1:_.slice_from("a");break;case 2:_.slice_from("e");break;case 3:case 4:case 5:case 6:case 7:_.slice_del()}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.hu.stemmer,"stemmer-hu"),e.hu.stopWordFilter=e.generateStopWordFilter("a abban ahhoz ahogy ahol aki akik akkor alatt amely amelyek amelyekben amelyeket amelyet amelynek ami amikor amit amolyan amíg annak arra arról az azok azon azonban azt aztán azután azzal azért be belül benne bár cikk cikkek cikkeket csak de e ebben eddig egy egyes egyetlen egyik egyre egyéb egész ehhez ekkor el ellen elsõ elég elõ elõször elõtt emilyen ennek erre ez ezek ezen ezt ezzel ezért fel felé hanem hiszen hogy hogyan igen ill ill. illetve ilyen ilyenkor ismét ison itt jobban jó jól kell kellett keressünk keresztül ki kívül között közül legalább legyen lehet lehetett lenne lenni lesz lett maga magát majd majd meg mellett mely melyek mert mi mikor milyen minden mindenki mindent mindig mint mintha mit mivel miért most már más másik még míg nagy nagyobb nagyon ne nekem neki nem nincs néha néhány nélkül olyan ott pedig persze rá s saját sem semmi sok sokat sokkal szemben szerint szinte számára talán tehát teljes tovább továbbá több ugyanis utolsó után utána vagy vagyis vagyok valaki valami valamint való van vannak vele vissza viszont volna volt voltak voltam voltunk által általában át én éppen és így õ õk õket össze úgy új újabb újra".split(" ")),e.Pipeline.registerFunction(e.hu.stopWordFilter,"stopWordFilter-hu")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.it.js b/docs/assets/javascripts/lunr/lunr.it.js new file mode 100644 index 000000000..3f9972051 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.it.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.it=function(){this.pipeline.reset(),this.pipeline.add(e.it.trimmer,e.it.stopWordFilter,e.it.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.it.stemmer))},e.it.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.it.trimmer=e.trimmerSupport.generateTrimmer(e.it.wordCharacters),e.Pipeline.registerFunction(e.it.trimmer,"trimmer-it"),e.it.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!z.eq_s(1,e)||(z.ket=z.cursor,!z.in_grouping(h,97,249)))&&(z.slice_from(r),z.cursor=n,!0)}function i(e){if(z.cursor=e,!z.in_grouping(h,97,249))return!1;for(;!z.out_grouping(h,97,249);){if(z.cursor>=z.limit)return!1;z.cursor++}return!0}function o(){var e,r=z.cursor;if(!function(){if(z.in_grouping(h,97,249)){var e=z.cursor;if(z.out_grouping(h,97,249)){for(;!z.in_grouping(h,97,249);){if(z.cursor>=z.limit)return i(e);z.cursor++}return!0}return i(e)}return!1}()){if(z.cursor=r,!z.out_grouping(h,97,249))return;if(e=z.cursor,z.out_grouping(h,97,249)){for(;!z.in_grouping(h,97,249);){if(z.cursor>=z.limit)return z.cursor=e,void(z.in_grouping(h,97,249)&&z.cursor=z.limit)return;z.cursor++}m=z.cursor}function t(){for(;!z.in_grouping(h,97,249);){if(z.cursor>=z.limit)return!1;z.cursor++}for(;!z.out_grouping(h,97,249);){if(z.cursor>=z.limit)return!1;z.cursor++}return!0}function s(){return m<=z.cursor}function a(){return w<=z.cursor}function u(){var e;if(z.ket=z.cursor,!(e=z.find_among_b(p,51)))return!1;switch(z.bra=z.cursor,e){case 1:if(!a())return!1;z.slice_del();break;case 2:if(!a())return!1;z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"ic")&&(z.bra=z.cursor,a()&&z.slice_del());break;case 3:if(!a())return!1;z.slice_from("log");break;case 4:if(!a())return!1;z.slice_from("u");break;case 5:if(!a())return!1;z.slice_from("ente");break;case 6:if(!s())return!1;z.slice_del();break;case 7:if(!(l<=z.cursor))return!1;z.slice_del(),z.ket=z.cursor,(e=z.find_among_b(_,4))&&(z.bra=z.cursor,a()&&(z.slice_del(),1==e&&(z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,a()&&z.slice_del()))));break;case 8:if(!a())return!1;z.slice_del(),z.ket=z.cursor,(e=z.find_among_b(g,3))&&(z.bra=z.cursor,1==e&&a()&&z.slice_del());break;case 9:if(!a())return!1;z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,a()&&(z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"ic")&&(z.bra=z.cursor,a()&&z.slice_del())))}return!0}function c(){!function(){var e=z.limit-z.cursor;z.ket=z.cursor,z.in_grouping_b(q,97,242)&&(z.bra=z.cursor,s()&&(z.slice_del(),z.ket=z.cursor,z.eq_s_b(1,"i")&&(z.bra=z.cursor,s())))?z.slice_del():z.cursor=z.limit-e}(),z.ket=z.cursor,z.eq_s_b(1,"h")&&(z.bra=z.cursor,z.in_grouping_b(C,99,103)&&s()&&z.slice_del())}var w,l,m,f=[new r("",-1,7),new r("qu",0,6),new r("á",0,1),new r("é",0,2),new r("í",0,3),new r("ó",0,4),new r("ú",0,5)],v=[new r("",-1,3),new r("I",0,1),new r("U",0,2)],b=[new r("la",-1,-1),new r("cela",0,-1),new r("gliela",0,-1),new r("mela",0,-1),new r("tela",0,-1),new r("vela",0,-1),new r("le",-1,-1),new r("cele",6,-1),new r("gliele",6,-1),new r("mele",6,-1),new r("tele",6,-1),new r("vele",6,-1),new r("ne",-1,-1),new r("cene",12,-1),new r("gliene",12,-1),new r("mene",12,-1),new r("sene",12,-1),new r("tene",12,-1),new r("vene",12,-1),new r("ci",-1,-1),new r("li",-1,-1),new r("celi",20,-1),new r("glieli",20,-1),new r("meli",20,-1),new r("teli",20,-1),new r("veli",20,-1),new r("gli",20,-1),new r("mi",-1,-1),new r("si",-1,-1),new r("ti",-1,-1),new r("vi",-1,-1),new r("lo",-1,-1),new r("celo",31,-1),new r("glielo",31,-1),new r("melo",31,-1),new r("telo",31,-1),new r("velo",31,-1)],d=[new r("ando",-1,1),new r("endo",-1,1),new r("ar",-1,2),new r("er",-1,2),new r("ir",-1,2)],_=[new r("ic",-1,-1),new r("abil",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],g=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],p=[new r("ica",-1,1),new r("logia",-1,3),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,9),new r("anza",-1,1),new r("enza",-1,5),new r("ice",-1,1),new r("atrice",7,1),new r("iche",-1,1),new r("logie",-1,3),new r("abile",-1,1),new r("ibile",-1,1),new r("usione",-1,4),new r("azione",-1,2),new r("uzione",-1,4),new r("atore",-1,2),new r("ose",-1,1),new r("ante",-1,1),new r("mente",-1,1),new r("amente",19,7),new r("iste",-1,1),new r("ive",-1,9),new r("anze",-1,1),new r("enze",-1,5),new r("ici",-1,1),new r("atrici",25,1),new r("ichi",-1,1),new r("abili",-1,1),new r("ibili",-1,1),new r("ismi",-1,1),new r("usioni",-1,4),new r("azioni",-1,2),new r("uzioni",-1,4),new r("atori",-1,2),new r("osi",-1,1),new r("anti",-1,1),new r("amenti",-1,6),new r("imenti",-1,6),new r("isti",-1,1),new r("ivi",-1,9),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,6),new r("imento",-1,6),new r("ivo",-1,9),new r("ità",-1,8),new r("istà",-1,1),new r("istè",-1,1),new r("istì",-1,1)],k=[new r("isca",-1,1),new r("enda",-1,1),new r("ata",-1,1),new r("ita",-1,1),new r("uta",-1,1),new r("ava",-1,1),new r("eva",-1,1),new r("iva",-1,1),new r("erebbe",-1,1),new r("irebbe",-1,1),new r("isce",-1,1),new r("ende",-1,1),new r("are",-1,1),new r("ere",-1,1),new r("ire",-1,1),new r("asse",-1,1),new r("ate",-1,1),new r("avate",16,1),new r("evate",16,1),new r("ivate",16,1),new r("ete",-1,1),new r("erete",20,1),new r("irete",20,1),new r("ite",-1,1),new r("ereste",-1,1),new r("ireste",-1,1),new r("ute",-1,1),new r("erai",-1,1),new r("irai",-1,1),new r("isci",-1,1),new r("endi",-1,1),new r("erei",-1,1),new r("irei",-1,1),new r("assi",-1,1),new r("ati",-1,1),new r("iti",-1,1),new r("eresti",-1,1),new r("iresti",-1,1),new r("uti",-1,1),new r("avi",-1,1),new r("evi",-1,1),new r("ivi",-1,1),new r("isco",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("Yamo",-1,1),new r("iamo",-1,1),new r("avamo",-1,1),new r("evamo",-1,1),new r("ivamo",-1,1),new r("eremo",-1,1),new r("iremo",-1,1),new r("assimo",-1,1),new r("ammo",-1,1),new r("emmo",-1,1),new r("eremmo",54,1),new r("iremmo",54,1),new r("immo",-1,1),new r("ano",-1,1),new r("iscano",58,1),new r("avano",58,1),new r("evano",58,1),new r("ivano",58,1),new r("eranno",-1,1),new r("iranno",-1,1),new r("ono",-1,1),new r("iscono",65,1),new r("arono",65,1),new r("erono",65,1),new r("irono",65,1),new r("erebbero",-1,1),new r("irebbero",-1,1),new r("assero",-1,1),new r("essero",-1,1),new r("issero",-1,1),new r("ato",-1,1),new r("ito",-1,1),new r("uto",-1,1),new r("avo",-1,1),new r("evo",-1,1),new r("ivo",-1,1),new r("ar",-1,1),new r("ir",-1,1),new r("erà",-1,1),new r("irà",-1,1),new r("erò",-1,1),new r("irò",-1,1)],h=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2,1],q=[17,65,0,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2],C=[17],z=new n;this.setCurrent=function(e){z.setCurrent(e)},this.getCurrent=function(){return z.getCurrent()},this.stem=function(){var r=z.cursor;return function(){for(var r,n,i,o,t=z.cursor;;){if(z.bra=z.cursor,r=z.find_among(f,7))switch(z.ket=z.cursor,r){case 1:z.slice_from("à");continue;case 2:z.slice_from("è");continue;case 3:z.slice_from("ì");continue;case 4:z.slice_from("ò");continue;case 5:z.slice_from("ù");continue;case 6:z.slice_from("qU");continue;case 7:if(z.cursor>=z.limit)break;z.cursor++;continue}break}for(z.cursor=t;;)for(n=z.cursor;;){if(i=z.cursor,z.in_grouping(h,97,249)){if(z.bra=z.cursor,o=z.cursor,e("u","U",i))break;if(z.cursor=o,e("i","I",i))break}if(z.cursor=i,z.cursor>=z.limit)return void(z.cursor=n);z.cursor++}}(),z.cursor=r,function(){var e=z.cursor;m=z.limit,l=m,w=m,o(),z.cursor=e,t()&&(l=z.cursor,t()&&(w=z.cursor))}(),z.limit_backward=r,z.cursor=z.limit,function(){var e;if(z.ket=z.cursor,z.find_among_b(b,37)&&(z.bra=z.cursor,(e=z.find_among_b(d,5))&&s()))switch(e){case 1:z.slice_del();break;case 2:z.slice_from("e")}}(),z.cursor=z.limit,u()||(z.cursor=z.limit,function(){var e,r;z.cursor>=m&&(r=z.limit_backward,z.limit_backward=m,z.ket=z.cursor,(e=z.find_among_b(k,87))&&(z.bra=z.cursor,1==e&&z.slice_del()),z.limit_backward=r)}()),z.cursor=z.limit,c(),z.cursor=z.limit_backward,function(){for(var e;z.bra=z.cursor,e=z.find_among(v,3);)switch(z.ket=z.cursor,e){case 1:z.slice_from("i");break;case 2:z.slice_from("u");break;case 3:if(z.cursor>=z.limit)return;z.cursor++}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.it.stemmer,"stemmer-it"),e.it.stopWordFilter=e.generateStopWordFilter("a abbia abbiamo abbiano abbiate ad agl agli ai al all alla alle allo anche avemmo avendo avesse avessero avessi avessimo aveste avesti avete aveva avevamo avevano avevate avevi avevo avrai avranno avrebbe avrebbero avrei avremmo avremo avreste avresti avrete avrà avrò avuta avute avuti avuto c che chi ci coi col come con contro cui da dagl dagli dai dal dall dalla dalle dallo degl degli dei del dell della delle dello di dov dove e ebbe ebbero ebbi ed era erano eravamo eravate eri ero essendo faccia facciamo facciano facciate faccio facemmo facendo facesse facessero facessi facessimo faceste facesti faceva facevamo facevano facevate facevi facevo fai fanno farai faranno farebbe farebbero farei faremmo faremo fareste faresti farete farà farò fece fecero feci fosse fossero fossi fossimo foste fosti fu fui fummo furono gli ha hai hanno ho i il in io l la le lei li lo loro lui ma mi mia mie miei mio ne negl negli nei nel nell nella nelle nello noi non nostra nostre nostri nostro o per perché più quale quanta quante quanti quanto quella quelle quelli quello questa queste questi questo sarai saranno sarebbe sarebbero sarei saremmo saremo sareste saresti sarete sarà sarò se sei si sia siamo siano siate siete sono sta stai stando stanno starai staranno starebbe starebbero starei staremmo staremo stareste staresti starete starà starò stava stavamo stavano stavate stavi stavo stemmo stesse stessero stessi stessimo steste stesti stette stettero stetti stia stiamo stiano stiate sto su sua sue sugl sugli sui sul sull sulla sulle sullo suo suoi ti tra tu tua tue tuo tuoi tutti tutto un una uno vi voi vostra vostre vostri vostro è".split(" ")),e.Pipeline.registerFunction(e.it.stopWordFilter,"stopWordFilter-it")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.jp.js b/docs/assets/javascripts/lunr/lunr.jp.js new file mode 100644 index 000000000..8e49d5082 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.jp.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.jp=function(){this.pipeline.reset(),this.pipeline.add(e.jp.stopWordFilter,e.jp.stemmer),r?this.tokenizer=e.jp.tokenizer:(e.tokenizer&&(e.tokenizer=e.jp.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.jp.tokenizer))};var t=new e.TinySegmenter;e.jp.tokenizer=function(n){if(!arguments.length||null==n||void 0==n)return[];if(Array.isArray(n))return n.map(function(t){return r?new e.Token(t.toLowerCase()):t.toLowerCase()});for(var i=n.toString().toLowerCase().replace(/^\s+/,""),o=i.length-1;o>=0;o--)if(/\S/.test(i.charAt(o))){i=i.substring(0,o+1);break}return t.segment(i).filter(function(e){return!!e}).map(function(t){return r?new e.Token(t):t})},e.jp.stemmer=function(e){return e},e.Pipeline.registerFunction(e.jp.stemmer,"stemmer-jp"),e.jp.wordCharacters="一二三四五六七八九十百千万億兆一-龠々〆ヵヶぁ-んァ-ヴーア-ン゙a-zA-Za-zA-Z0-90-9",e.jp.stopWordFilter=function(t){if(-1===e.jp.stopWordFilter.stopWords.indexOf(r?t.toString():t))return t},e.jp.stopWordFilter=e.generateStopWordFilter("これ それ あれ この その あの ここ そこ あそこ こちら どこ だれ なに なん 何 私 貴方 貴方方 我々 私達 あの人 あのかた 彼女 彼 です あります おります います は が の に を で え から まで より も どの と し それで しかし".split(" ")),e.Pipeline.registerFunction(e.jp.stopWordFilter,"stopWordFilter-jp")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.multi.js b/docs/assets/javascripts/lunr/lunr.multi.js new file mode 100644 index 000000000..d3dbc860c --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.multi.js @@ -0,0 +1 @@ +!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){e.multiLanguage=function(){for(var i=Array.prototype.slice.call(arguments),t=i.join("-"),r="",n=[],s=[],p=0;p=u.limit)return;u.cursor=r+1}for(;!u.out_grouping(a,97,248);){if(u.cursor>=u.limit)return;u.cursor++}(i=u.cursor)=i&&(r=u.limit_backward,u.limit_backward=i,u.ket=u.cursor,e=u.find_among_b(t,29),u.limit_backward=r,e))switch(u.bra=u.cursor,e){case 1:u.slice_del();break;case 2:n=u.limit-u.cursor,u.in_grouping_b(m,98,122)?u.slice_del():(u.cursor=u.limit-n,u.eq_s_b(1,"k")&&u.out_grouping_b(a,97,248)&&u.slice_del());break;case 3:u.slice_from("er")}}(),u.cursor=u.limit,function(){var e,r=u.limit-u.cursor;u.cursor>=i&&(e=u.limit_backward,u.limit_backward=i,u.ket=u.cursor,u.find_among_b(o,2)?(u.bra=u.cursor,u.limit_backward=e,u.cursor=u.limit-r,u.cursor>u.limit_backward&&(u.cursor--,u.bra=u.cursor,u.slice_del())):u.limit_backward=e)}(),u.cursor=u.limit,function(){var e,r;u.cursor>=i&&(r=u.limit_backward,u.limit_backward=i,u.ket=u.cursor,(e=u.find_among_b(s,11))?(u.bra=u.cursor,u.limit_backward=r,1==e&&u.slice_del()):u.limit_backward=r)}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.pt.js b/docs/assets/javascripts/lunr/lunr.pt.js new file mode 100644 index 000000000..006ffc93d --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.pt.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.pt=function(){this.pipeline.reset(),this.pipeline.add(e.pt.trimmer,e.pt.stopWordFilter,e.pt.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.pt.stemmer))},e.pt.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.pt.trimmer=e.trimmerSupport.generateTrimmer(e.pt.wordCharacters),e.Pipeline.registerFunction(e.pt.trimmer,"trimmer-pt"),e.pt.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,n=new function(){function e(){if(j.out_grouping(q,97,250)){for(;!j.in_grouping(q,97,250);){if(j.cursor>=j.limit)return!0;j.cursor++}return!1}return!0}function n(){var r,s,n=j.cursor;if(j.in_grouping(q,97,250))if(r=j.cursor,e()){if(j.cursor=r,function(){if(j.in_grouping(q,97,250))for(;!j.out_grouping(q,97,250);){if(j.cursor>=j.limit)return!1;j.cursor++}return l=j.cursor,!0}())return}else l=j.cursor;if(j.cursor=n,j.out_grouping(q,97,250)){if(s=j.cursor,e()){if(j.cursor=s,!j.in_grouping(q,97,250)||j.cursor>=j.limit)return;j.cursor++}l=j.cursor}}function i(){for(;!j.in_grouping(q,97,250);){if(j.cursor>=j.limit)return!1;j.cursor++}for(;!j.out_grouping(q,97,250);){if(j.cursor>=j.limit)return!1;j.cursor++}return!0}function o(){return l<=j.cursor}function a(){return m<=j.cursor}function t(){var e;if(j.ket=j.cursor,!(e=j.find_among_b(h,45)))return!1;switch(j.bra=j.cursor,e){case 1:if(!a())return!1;j.slice_del();break;case 2:if(!a())return!1;j.slice_from("log");break;case 3:if(!a())return!1;j.slice_from("u");break;case 4:if(!a())return!1;j.slice_from("ente");break;case 5:if(!(c<=j.cursor))return!1;j.slice_del(),j.ket=j.cursor,(e=j.find_among_b(v,4))&&(j.bra=j.cursor,a()&&(j.slice_del(),1==e&&(j.ket=j.cursor,j.eq_s_b(2,"at")&&(j.bra=j.cursor,a()&&j.slice_del()))));break;case 6:if(!a())return!1;j.slice_del(),j.ket=j.cursor,(e=j.find_among_b(p,3))&&(j.bra=j.cursor,1==e&&a()&&j.slice_del());break;case 7:if(!a())return!1;j.slice_del(),j.ket=j.cursor,(e=j.find_among_b(_,3))&&(j.bra=j.cursor,1==e&&a()&&j.slice_del());break;case 8:if(!a())return!1;j.slice_del(),j.ket=j.cursor,j.eq_s_b(2,"at")&&(j.bra=j.cursor,a()&&j.slice_del());break;case 9:if(!o()||!j.eq_s_b(1,"e"))return!1;j.slice_from("ir")}return!0}function u(e,r){if(j.eq_s_b(1,e)){j.bra=j.cursor;var s=j.limit-j.cursor;if(j.eq_s_b(1,r))return j.cursor=j.limit-s,o()&&j.slice_del(),!1}return!0}function w(){if(!t()&&(j.cursor=j.limit,!function(){var e,r;if(j.cursor>=l){if(r=j.limit_backward,j.limit_backward=l,j.ket=j.cursor,e=j.find_among_b(b,120))return j.bra=j.cursor,1==e&&j.slice_del(),j.limit_backward=r,!0;j.limit_backward=r}return!1}()))return j.cursor=j.limit,void function(){var e;j.ket=j.cursor,(e=j.find_among_b(g,7))&&(j.bra=j.cursor,1==e&&o()&&j.slice_del())}();j.cursor=j.limit,j.ket=j.cursor,j.eq_s_b(1,"i")&&(j.bra=j.cursor,j.eq_s_b(1,"c")&&(j.cursor=j.limit,o()&&j.slice_del()))}var m,c,l,f=[new r("",-1,3),new r("ã",0,1),new r("õ",0,2)],d=[new r("",-1,3),new r("a~",0,1),new r("o~",0,2)],v=[new r("ic",-1,-1),new r("ad",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],p=[new r("ante",-1,1),new r("avel",-1,1),new r("ível",-1,1)],_=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],h=[new r("ica",-1,1),new r("ância",-1,1),new r("ência",-1,4),new r("ira",-1,9),new r("adora",-1,1),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,8),new r("eza",-1,1),new r("logía",-1,2),new r("idade",-1,7),new r("ante",-1,1),new r("mente",-1,6),new r("amente",12,5),new r("ável",-1,1),new r("ível",-1,1),new r("ución",-1,3),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,1),new r("imento",-1,1),new r("ivo",-1,8),new r("aça~o",-1,1),new r("ador",-1,1),new r("icas",-1,1),new r("ências",-1,4),new r("iras",-1,9),new r("adoras",-1,1),new r("osas",-1,1),new r("istas",-1,1),new r("ivas",-1,8),new r("ezas",-1,1),new r("logías",-1,2),new r("idades",-1,7),new r("uciones",-1,3),new r("adores",-1,1),new r("antes",-1,1),new r("aço~es",-1,1),new r("icos",-1,1),new r("ismos",-1,1),new r("osos",-1,1),new r("amentos",-1,1),new r("imentos",-1,1),new r("ivos",-1,8)],b=[new r("ada",-1,1),new r("ida",-1,1),new r("ia",-1,1),new r("aria",2,1),new r("eria",2,1),new r("iria",2,1),new r("ara",-1,1),new r("era",-1,1),new r("ira",-1,1),new r("ava",-1,1),new r("asse",-1,1),new r("esse",-1,1),new r("isse",-1,1),new r("aste",-1,1),new r("este",-1,1),new r("iste",-1,1),new r("ei",-1,1),new r("arei",16,1),new r("erei",16,1),new r("irei",16,1),new r("am",-1,1),new r("iam",20,1),new r("ariam",21,1),new r("eriam",21,1),new r("iriam",21,1),new r("aram",20,1),new r("eram",20,1),new r("iram",20,1),new r("avam",20,1),new r("em",-1,1),new r("arem",29,1),new r("erem",29,1),new r("irem",29,1),new r("assem",29,1),new r("essem",29,1),new r("issem",29,1),new r("ado",-1,1),new r("ido",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("indo",-1,1),new r("ara~o",-1,1),new r("era~o",-1,1),new r("ira~o",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("ir",-1,1),new r("as",-1,1),new r("adas",47,1),new r("idas",47,1),new r("ias",47,1),new r("arias",50,1),new r("erias",50,1),new r("irias",50,1),new r("aras",47,1),new r("eras",47,1),new r("iras",47,1),new r("avas",47,1),new r("es",-1,1),new r("ardes",58,1),new r("erdes",58,1),new r("irdes",58,1),new r("ares",58,1),new r("eres",58,1),new r("ires",58,1),new r("asses",58,1),new r("esses",58,1),new r("isses",58,1),new r("astes",58,1),new r("estes",58,1),new r("istes",58,1),new r("is",-1,1),new r("ais",71,1),new r("eis",71,1),new r("areis",73,1),new r("ereis",73,1),new r("ireis",73,1),new r("áreis",73,1),new r("éreis",73,1),new r("íreis",73,1),new r("ásseis",73,1),new r("ésseis",73,1),new r("ísseis",73,1),new r("áveis",73,1),new r("íeis",73,1),new r("aríeis",84,1),new r("eríeis",84,1),new r("iríeis",84,1),new r("ados",-1,1),new r("idos",-1,1),new r("amos",-1,1),new r("áramos",90,1),new r("éramos",90,1),new r("íramos",90,1),new r("ávamos",90,1),new r("íamos",90,1),new r("aríamos",95,1),new r("eríamos",95,1),new r("iríamos",95,1),new r("emos",-1,1),new r("aremos",99,1),new r("eremos",99,1),new r("iremos",99,1),new r("ássemos",99,1),new r("êssemos",99,1),new r("íssemos",99,1),new r("imos",-1,1),new r("armos",-1,1),new r("ermos",-1,1),new r("irmos",-1,1),new r("ámos",-1,1),new r("arás",-1,1),new r("erás",-1,1),new r("irás",-1,1),new r("eu",-1,1),new r("iu",-1,1),new r("ou",-1,1),new r("ará",-1,1),new r("erá",-1,1),new r("irá",-1,1)],g=[new r("a",-1,1),new r("i",-1,1),new r("o",-1,1),new r("os",-1,1),new r("á",-1,1),new r("í",-1,1),new r("ó",-1,1)],k=[new r("e",-1,1),new r("ç",-1,2),new r("é",-1,1),new r("ê",-1,1)],q=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,3,19,12,2],j=new s;this.setCurrent=function(e){j.setCurrent(e)},this.getCurrent=function(){return j.getCurrent()},this.stem=function(){var e=j.cursor;return function(){for(var e;;){if(j.bra=j.cursor,e=j.find_among(f,3))switch(j.ket=j.cursor,e){case 1:j.slice_from("a~");continue;case 2:j.slice_from("o~");continue;case 3:if(j.cursor>=j.limit)break;j.cursor++;continue}break}}(),j.cursor=e,function(){var e=j.cursor;l=j.limit,c=l,m=l,n(),j.cursor=e,i()&&(c=j.cursor,i()&&(m=j.cursor))}(),j.limit_backward=e,j.cursor=j.limit,w(),j.cursor=j.limit,function(){var e;if(j.ket=j.cursor,e=j.find_among_b(k,4))switch(j.bra=j.cursor,e){case 1:o()&&(j.slice_del(),j.ket=j.cursor,j.limit,j.cursor,u("u","g")&&u("i","c"));break;case 2:j.slice_from("c")}}(),j.cursor=j.limit_backward,function(){for(var e;;){if(j.bra=j.cursor,e=j.find_among(d,3))switch(j.ket=j.cursor,e){case 1:j.slice_from("ã");continue;case 2:j.slice_from("õ");continue;case 3:if(j.cursor>=j.limit)break;j.cursor++;continue}break}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.pt.stemmer,"stemmer-pt"),e.pt.stopWordFilter=e.generateStopWordFilter("a ao aos aquela aquelas aquele aqueles aquilo as até com como da das de dela delas dele deles depois do dos e ela elas ele eles em entre era eram essa essas esse esses esta estamos estas estava estavam este esteja estejam estejamos estes esteve estive estivemos estiver estivera estiveram estiverem estivermos estivesse estivessem estivéramos estivéssemos estou está estávamos estão eu foi fomos for fora foram forem formos fosse fossem fui fôramos fôssemos haja hajam hajamos havemos hei houve houvemos houver houvera houveram houverei houverem houveremos houveria houveriam houvermos houverá houverão houveríamos houvesse houvessem houvéramos houvéssemos há hão isso isto já lhe lhes mais mas me mesmo meu meus minha minhas muito na nas nem no nos nossa nossas nosso nossos num numa não nós o os ou para pela pelas pelo pelos por qual quando que quem se seja sejam sejamos sem serei seremos seria seriam será serão seríamos seu seus somos sou sua suas são só também te tem temos tenha tenham tenhamos tenho terei teremos teria teriam terá terão teríamos teu teus teve tinha tinham tive tivemos tiver tivera tiveram tiverem tivermos tivesse tivessem tivéramos tivéssemos tu tua tuas tém tínhamos um uma você vocês vos à às éramos".split(" ")),e.Pipeline.registerFunction(e.pt.stopWordFilter,"stopWordFilter-pt")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.ro.js b/docs/assets/javascripts/lunr/lunr.ro.js new file mode 100644 index 000000000..9b5612891 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.ro.js @@ -0,0 +1 @@ +!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ro=function(){this.pipeline.reset(),this.pipeline.add(e.ro.trimmer,e.ro.stopWordFilter,e.ro.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ro.stemmer))},e.ro.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.ro.trimmer=e.trimmerSupport.generateTrimmer(e.ro.wordCharacters),e.Pipeline.registerFunction(e.ro.trimmer,"trimmer-ro"),e.ro.stemmer=function(){var i=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(e,i){h.eq_s(1,e)&&(h.ket=h.cursor,h.in_grouping(k,97,259)&&h.slice_from(i))}function n(){if(h.out_grouping(k,97,259)){for(;!h.in_grouping(k,97,259);){if(h.cursor>=h.limit)return!0;h.cursor++}return!1}return!0}function t(){var e,i,r=h.cursor;if(h.in_grouping(k,97,259)){if(e=h.cursor,!n())return void(f=h.cursor);if(h.cursor=e,!function(){if(h.in_grouping(k,97,259))for(;!h.out_grouping(k,97,259);){if(h.cursor>=h.limit)return!0;h.cursor++}return!1}())return void(f=h.cursor)}h.cursor=r,h.out_grouping(k,97,259)&&(i=h.cursor,n()&&(h.cursor=i,h.in_grouping(k,97,259)&&h.cursor=h.limit)return!1;h.cursor++}for(;!h.out_grouping(k,97,259);){if(h.cursor>=h.limit)return!1;h.cursor++}return!0}function o(){return l<=h.cursor}function s(){var e,i=h.limit-h.cursor;if(h.ket=h.cursor,(e=h.find_among_b(b,46))&&(h.bra=h.cursor,o())){switch(e){case 1:h.slice_from("abil");break;case 2:h.slice_from("ibil");break;case 3:h.slice_from("iv");break;case 4:h.slice_from("ic");break;case 5:h.slice_from("at");break;case 6:h.slice_from("it")}return w=!0,h.cursor=h.limit-i,!0}return!1}function c(){var e,i;for(w=!1;;)if(i=h.limit-h.cursor,!s()){h.cursor=h.limit-i;break}if(h.ket=h.cursor,(e=h.find_among_b(v,62))&&(h.bra=h.cursor,m<=h.cursor)){switch(e){case 1:h.slice_del();break;case 2:h.eq_s_b(1,"ţ")&&(h.bra=h.cursor,h.slice_from("t"));break;case 3:h.slice_from("ist")}w=!0}}function u(){var e;h.ket=h.cursor,(e=h.find_among_b(g,5))&&(h.bra=h.cursor,f<=h.cursor&&1==e&&h.slice_del())}var w,m,l,f,p=[new i("",-1,3),new i("I",0,1),new i("U",0,2)],d=[new i("ea",-1,3),new i("aţia",-1,7),new i("aua",-1,2),new i("iua",-1,4),new i("aţie",-1,7),new i("ele",-1,3),new i("ile",-1,5),new i("iile",6,4),new i("iei",-1,4),new i("atei",-1,6),new i("ii",-1,4),new i("ului",-1,1),new i("ul",-1,1),new i("elor",-1,3),new i("ilor",-1,4),new i("iilor",14,4)],b=[new i("icala",-1,4),new i("iciva",-1,4),new i("ativa",-1,5),new i("itiva",-1,6),new i("icale",-1,4),new i("aţiune",-1,5),new i("iţiune",-1,6),new i("atoare",-1,5),new i("itoare",-1,6),new i("ătoare",-1,5),new i("icitate",-1,4),new i("abilitate",-1,1),new i("ibilitate",-1,2),new i("ivitate",-1,3),new i("icive",-1,4),new i("ative",-1,5),new i("itive",-1,6),new i("icali",-1,4),new i("atori",-1,5),new i("icatori",18,4),new i("itori",-1,6),new i("ători",-1,5),new i("icitati",-1,4),new i("abilitati",-1,1),new i("ivitati",-1,3),new i("icivi",-1,4),new i("ativi",-1,5),new i("itivi",-1,6),new i("icităi",-1,4),new i("abilităi",-1,1),new i("ivităi",-1,3),new i("icităţi",-1,4),new i("abilităţi",-1,1),new i("ivităţi",-1,3),new i("ical",-1,4),new i("ator",-1,5),new i("icator",35,4),new i("itor",-1,6),new i("ător",-1,5),new i("iciv",-1,4),new i("ativ",-1,5),new i("itiv",-1,6),new i("icală",-1,4),new i("icivă",-1,4),new i("ativă",-1,5),new i("itivă",-1,6)],v=[new i("ica",-1,1),new i("abila",-1,1),new i("ibila",-1,1),new i("oasa",-1,1),new i("ata",-1,1),new i("ita",-1,1),new i("anta",-1,1),new i("ista",-1,3),new i("uta",-1,1),new i("iva",-1,1),new i("ic",-1,1),new i("ice",-1,1),new i("abile",-1,1),new i("ibile",-1,1),new i("isme",-1,3),new i("iune",-1,2),new i("oase",-1,1),new i("ate",-1,1),new i("itate",17,1),new i("ite",-1,1),new i("ante",-1,1),new i("iste",-1,3),new i("ute",-1,1),new i("ive",-1,1),new i("ici",-1,1),new i("abili",-1,1),new i("ibili",-1,1),new i("iuni",-1,2),new i("atori",-1,1),new i("osi",-1,1),new i("ati",-1,1),new i("itati",30,1),new i("iti",-1,1),new i("anti",-1,1),new i("isti",-1,3),new i("uti",-1,1),new i("işti",-1,3),new i("ivi",-1,1),new i("ităi",-1,1),new i("oşi",-1,1),new i("ităţi",-1,1),new i("abil",-1,1),new i("ibil",-1,1),new i("ism",-1,3),new i("ator",-1,1),new i("os",-1,1),new i("at",-1,1),new i("it",-1,1),new i("ant",-1,1),new i("ist",-1,3),new i("ut",-1,1),new i("iv",-1,1),new i("ică",-1,1),new i("abilă",-1,1),new i("ibilă",-1,1),new i("oasă",-1,1),new i("ată",-1,1),new i("ită",-1,1),new i("antă",-1,1),new i("istă",-1,3),new i("ută",-1,1),new i("ivă",-1,1)],_=[new i("ea",-1,1),new i("ia",-1,1),new i("esc",-1,1),new i("ăsc",-1,1),new i("ind",-1,1),new i("ând",-1,1),new i("are",-1,1),new i("ere",-1,1),new i("ire",-1,1),new i("âre",-1,1),new i("se",-1,2),new i("ase",10,1),new i("sese",10,2),new i("ise",10,1),new i("use",10,1),new i("âse",10,1),new i("eşte",-1,1),new i("ăşte",-1,1),new i("eze",-1,1),new i("ai",-1,1),new i("eai",19,1),new i("iai",19,1),new i("sei",-1,2),new i("eşti",-1,1),new i("ăşti",-1,1),new i("ui",-1,1),new i("ezi",-1,1),new i("âi",-1,1),new i("aşi",-1,1),new i("seşi",-1,2),new i("aseşi",29,1),new i("seseşi",29,2),new i("iseşi",29,1),new i("useşi",29,1),new i("âseşi",29,1),new i("işi",-1,1),new i("uşi",-1,1),new i("âşi",-1,1),new i("aţi",-1,2),new i("eaţi",38,1),new i("iaţi",38,1),new i("eţi",-1,2),new i("iţi",-1,2),new i("âţi",-1,2),new i("arăţi",-1,1),new i("serăţi",-1,2),new i("aserăţi",45,1),new i("seserăţi",45,2),new i("iserăţi",45,1),new i("userăţi",45,1),new i("âserăţi",45,1),new i("irăţi",-1,1),new i("urăţi",-1,1),new i("ârăţi",-1,1),new i("am",-1,1),new i("eam",54,1),new i("iam",54,1),new i("em",-1,2),new i("asem",57,1),new i("sesem",57,2),new i("isem",57,1),new i("usem",57,1),new i("âsem",57,1),new i("im",-1,2),new i("âm",-1,2),new i("ăm",-1,2),new i("arăm",65,1),new i("serăm",65,2),new i("aserăm",67,1),new i("seserăm",67,2),new i("iserăm",67,1),new i("userăm",67,1),new i("âserăm",67,1),new i("irăm",65,1),new i("urăm",65,1),new i("ârăm",65,1),new i("au",-1,1),new i("eau",76,1),new i("iau",76,1),new i("indu",-1,1),new i("ându",-1,1),new i("ez",-1,1),new i("ească",-1,1),new i("ară",-1,1),new i("seră",-1,2),new i("aseră",84,1),new i("seseră",84,2),new i("iseră",84,1),new i("useră",84,1),new i("âseră",84,1),new i("iră",-1,1),new i("ură",-1,1),new i("âră",-1,1),new i("ează",-1,1)],g=[new i("a",-1,1),new i("e",-1,1),new i("ie",1,1),new i("i",-1,1),new i("ă",-1,1)],k=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4],h=new r;this.setCurrent=function(e){h.setCurrent(e)},this.getCurrent=function(){return h.getCurrent()},this.stem=function(){var i=h.cursor;return function(){for(var i,r;i=h.cursor,h.in_grouping(k,97,259)&&(r=h.cursor,h.bra=r,e("u","U"),h.cursor=r,e("i","I")),h.cursor=i,!(h.cursor>=h.limit);)h.cursor++}(),h.cursor=i,function(){var e=h.cursor;f=h.limit,l=f,m=f,t(),h.cursor=e,a()&&(l=h.cursor,a()&&(m=h.cursor))}(),h.limit_backward=i,h.cursor=h.limit,function(){var e,i;if(h.ket=h.cursor,(e=h.find_among_b(d,16))&&(h.bra=h.cursor,o()))switch(e){case 1:h.slice_del();break;case 2:h.slice_from("a");break;case 3:h.slice_from("e");break;case 4:h.slice_from("i");break;case 5:i=h.limit-h.cursor,h.eq_s_b(2,"ab")||(h.cursor=h.limit-i,h.slice_from("i"));break;case 6:h.slice_from("at");break;case 7:h.slice_from("aţi")}}(),h.cursor=h.limit,c(),h.cursor=h.limit,w||(h.cursor=h.limit,function(){var e,i,r;if(h.cursor>=f){if(i=h.limit_backward,h.limit_backward=f,h.ket=h.cursor,e=h.find_among_b(_,94))switch(h.bra=h.cursor,e){case 1:if(r=h.limit-h.cursor,!h.out_grouping_b(k,97,259)&&(h.cursor=h.limit-r,!h.eq_s_b(1,"u")))break;case 2:h.slice_del()}h.limit_backward=i}}(),h.cursor=h.limit),u(),h.cursor=h.limit_backward,function(){for(var e;;){if(h.bra=h.cursor,e=h.find_among(p,3))switch(h.ket=h.cursor,e){case 1:h.slice_from("i");continue;case 2:h.slice_from("u");continue;case 3:if(h.cursor>=h.limit)break;h.cursor++;continue}break}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.ro.stemmer,"stemmer-ro"),e.ro.stopWordFilter=e.generateStopWordFilter("acea aceasta această aceea acei aceia acel acela acele acelea acest acesta aceste acestea aceşti aceştia acolo acord acum ai aia aibă aici al ale alea altceva altcineva am ar are asemenea asta astea astăzi asupra au avea avem aveţi azi aş aşadar aţi bine bucur bună ca care caut ce cel ceva chiar cinci cine cineva contra cu cum cumva curând curînd când cât câte câtva câţi cînd cît cîte cîtva cîţi că căci cărei căror cărui către da dacă dar datorită dată dau de deci deja deoarece departe deşi din dinaintea dintr- dintre doi doilea două drept după dă ea ei el ele eram este eu eşti face fata fi fie fiecare fii fim fiu fiţi frumos fără graţie halbă iar ieri la le li lor lui lângă lîngă mai mea mei mele mereu meu mi mie mine mult multă mulţi mulţumesc mâine mîine mă ne nevoie nici nicăieri nimeni nimeri nimic nişte noastre noastră noi noroc nostru nouă noştri nu opt ori oricare orice oricine oricum oricând oricât oricînd oricît oriunde patra patru patrulea pe pentru peste pic poate pot prea prima primul prin puţin puţina puţină până pînă rog sa sale sau se spate spre sub sunt suntem sunteţi sută sînt sîntem sînteţi să săi său ta tale te timp tine toate toată tot totuşi toţi trei treia treilea tu tăi tău un una unde undeva unei uneia unele uneori unii unor unora unu unui unuia unul vi voastre voastră voi vostru vouă voştri vreme vreo vreun vă zece zero zi zice îi îl îmi împotriva în înainte înaintea încotro încât încît între întrucât întrucît îţi ăla ălea ăsta ăstea ăştia şapte şase şi ştiu ţi ţie".split(" ")),e.Pipeline.registerFunction(e.ro.stopWordFilter,"stopWordFilter-ro")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.ru.js b/docs/assets/javascripts/lunr/lunr.ru.js new file mode 100644 index 000000000..11d4e517d --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.ru.js @@ -0,0 +1 @@ +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ru=function(){this.pipeline.reset(),this.pipeline.add(e.ru.trimmer,e.ru.stopWordFilter,e.ru.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ru.stemmer))},e.ru.wordCharacters="Ѐ-҄҇-ԯᴫᵸⷠ-ⷿꙀ-ꚟ︮︯",e.ru.trimmer=e.trimmerSupport.generateTrimmer(e.ru.wordCharacters),e.Pipeline.registerFunction(e.ru.trimmer,"trimmer-ru"),e.ru.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,t=new function(){function e(){for(;!g.in_grouping(h,1072,1103);){if(g.cursor>=g.limit)return!1;g.cursor++}return!0}function t(){for(;!g.out_grouping(h,1072,1103);){if(g.cursor>=g.limit)return!1;g.cursor++}return!0}function w(e,n){var r,t;if(g.ket=g.cursor,r=g.find_among_b(e,n)){switch(g.bra=g.cursor,r){case 1:if(t=g.limit-g.cursor,!g.eq_s_b(1,"а")&&(g.cursor=g.limit-t,!g.eq_s_b(1,"я")))return!1;case 2:g.slice_del()}return!0}return!1}function i(e,n){var r;return g.ket=g.cursor,!!(r=g.find_among_b(e,n))&&(g.bra=g.cursor,1==r&&g.slice_del(),!0)}function u(){return!!i(l,26)&&(w(f,8),!0)}function s(){var e;g.ket=g.cursor,(e=g.find_among_b(_,2))&&(g.bra=g.cursor,o<=g.cursor&&1==e&&g.slice_del())}var o,c,m=[new n("в",-1,1),new n("ив",0,2),new n("ыв",0,2),new n("вши",-1,1),new n("ивши",3,2),new n("ывши",3,2),new n("вшись",-1,1),new n("ившись",6,2),new n("ывшись",6,2)],l=[new n("ее",-1,1),new n("ие",-1,1),new n("ое",-1,1),new n("ые",-1,1),new n("ими",-1,1),new n("ыми",-1,1),new n("ей",-1,1),new n("ий",-1,1),new n("ой",-1,1),new n("ый",-1,1),new n("ем",-1,1),new n("им",-1,1),new n("ом",-1,1),new n("ым",-1,1),new n("его",-1,1),new n("ого",-1,1),new n("ему",-1,1),new n("ому",-1,1),new n("их",-1,1),new n("ых",-1,1),new n("ею",-1,1),new n("ою",-1,1),new n("ую",-1,1),new n("юю",-1,1),new n("ая",-1,1),new n("яя",-1,1)],f=[new n("ем",-1,1),new n("нн",-1,1),new n("вш",-1,1),new n("ивш",2,2),new n("ывш",2,2),new n("щ",-1,1),new n("ющ",5,1),new n("ующ",6,2)],a=[new n("сь",-1,1),new n("ся",-1,1)],p=[new n("ла",-1,1),new n("ила",0,2),new n("ыла",0,2),new n("на",-1,1),new n("ена",3,2),new n("ете",-1,1),new n("ите",-1,2),new n("йте",-1,1),new n("ейте",7,2),new n("уйте",7,2),new n("ли",-1,1),new n("или",10,2),new n("ыли",10,2),new n("й",-1,1),new n("ей",13,2),new n("уй",13,2),new n("л",-1,1),new n("ил",16,2),new n("ыл",16,2),new n("ем",-1,1),new n("им",-1,2),new n("ым",-1,2),new n("н",-1,1),new n("ен",22,2),new n("ло",-1,1),new n("ило",24,2),new n("ыло",24,2),new n("но",-1,1),new n("ено",27,2),new n("нно",27,1),new n("ет",-1,1),new n("ует",30,2),new n("ит",-1,2),new n("ыт",-1,2),new n("ют",-1,1),new n("уют",34,2),new n("ят",-1,2),new n("ны",-1,1),new n("ены",37,2),new n("ть",-1,1),new n("ить",39,2),new n("ыть",39,2),new n("ешь",-1,1),new n("ишь",-1,2),new n("ю",-1,2),new n("ую",44,2)],d=[new n("а",-1,1),new n("ев",-1,1),new n("ов",-1,1),new n("е",-1,1),new n("ие",3,1),new n("ье",3,1),new n("и",-1,1),new n("еи",6,1),new n("ии",6,1),new n("ами",6,1),new n("ями",6,1),new n("иями",10,1),new n("й",-1,1),new n("ей",12,1),new n("ией",13,1),new n("ий",12,1),new n("ой",12,1),new n("ам",-1,1),new n("ем",-1,1),new n("ием",18,1),new n("ом",-1,1),new n("ям",-1,1),new n("иям",21,1),new n("о",-1,1),new n("у",-1,1),new n("ах",-1,1),new n("ях",-1,1),new n("иях",26,1),new n("ы",-1,1),new n("ь",-1,1),new n("ю",-1,1),new n("ию",30,1),new n("ью",30,1),new n("я",-1,1),new n("ия",33,1),new n("ья",33,1)],_=[new n("ост",-1,1),new n("ость",-1,1)],b=[new n("ейше",-1,1),new n("н",-1,2),new n("ейш",-1,1),new n("ь",-1,3)],h=[33,65,8,232],g=new r;this.setCurrent=function(e){g.setCurrent(e)},this.getCurrent=function(){return g.getCurrent()},this.stem=function(){return c=g.limit,o=c,e()&&(c=g.cursor,t()&&e()&&t()&&(o=g.cursor)),g.cursor=g.limit,!(g.cursor=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor++,!0}return!1},in_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e<=s&&e>=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor--,!0}return!1},out_grouping:function(t,i,s){if(this.cursors||e>3]&1<<(7&e)))return this.cursor++,!0}return!1},out_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e>s||e>3]&1<<(7&e)))return this.cursor--,!0}return!1},eq_s:function(t,i){if(this.limit-this.cursor>1),f=0,l=o0||e==s||c)break;c=!0}}for(;;){if(o>=(_=t[s]).s_size){if(this.cursor=n+_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n+_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},find_among_b:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit_backward,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o=0;_--){if(n-l==u){f=-1;break}if(f=r.charCodeAt(n-1-l)-m.s[_])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){var m=t[s];if(o>=m.s_size){if(this.cursor=n-m.s_size,!m.method)return m.result;var b=m.method();if(this.cursor=n-m.s_size,b)return m.result}if((s=m.substring_i)<0)return 0}},replace_s:function(t,i,s){var e=s.length-(i-t),n=r.substring(0,t),u=r.substring(i);return r=n+s+u,this.limit+=e,this.cursor>=i?this.cursor+=e:this.cursor>t&&(this.cursor=t),e},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>r.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),r.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.sv.js b/docs/assets/javascripts/lunr/lunr.sv.js new file mode 100644 index 000000000..70211fd77 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.sv.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,t=new function(){var e,t,i=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],s=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],o=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],u=[119,127,149],c=new n;this.setCurrent=function(e){c.setCurrent(e)},this.getCurrent=function(){return c.getCurrent()},this.stem=function(){var r=c.cursor;return function(){var r,n=c.cursor+3;if(t=c.limit,0<=n||n<=c.limit){for(e=n;;){if(r=c.cursor,c.in_grouping(o,97,246)){c.cursor=r;break}if(c.cursor=r,c.cursor>=c.limit)return;c.cursor++}for(;!c.out_grouping(o,97,246);){if(c.cursor>=c.limit)return;c.cursor++}(t=c.cursor)=t&&(c.limit_backward=t,c.cursor=c.limit,c.ket=c.cursor,e=c.find_among_b(i,37),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del();break;case 2:c.in_grouping_b(u,98,121)&&c.slice_del()}}(),c.cursor=c.limit,function(){var e=c.limit_backward;c.cursor>=t&&(c.limit_backward=t,c.cursor=c.limit,c.find_among_b(s,7)&&(c.cursor=c.limit,c.ket=c.cursor,c.cursor>c.limit_backward&&(c.bra=--c.cursor,c.slice_del())),c.limit_backward=e)}(),c.cursor=c.limit,function(){var e,r;if(c.cursor>=t){if(r=c.limit_backward,c.limit_backward=t,c.cursor=c.limit,c.ket=c.cursor,e=c.find_among_b(a,5))switch(c.bra=c.cursor,e){case 1:c.slice_del();break;case 2:c.slice_from("lös");break;case 3:c.slice_from("full")}c.limit_backward=r}}(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return t.setCurrent(e),t.stem(),t.getCurrent()}):(t.setCurrent(e),t.stem(),t.getCurrent())}}(),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/lunr.tr.js b/docs/assets/javascripts/lunr/lunr.tr.js new file mode 100644 index 000000000..db7c908a5 --- /dev/null +++ b/docs/assets/javascripts/lunr/lunr.tr.js @@ -0,0 +1 @@ +!function(r,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(r.lunr)}(this,function(){return function(r){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");r.tr=function(){this.pipeline.reset(),this.pipeline.add(r.tr.trimmer,r.tr.stopWordFilter,r.tr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(r.tr.stemmer))},r.tr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",r.tr.trimmer=r.trimmerSupport.generateTrimmer(r.tr.wordCharacters),r.Pipeline.registerFunction(r.tr.trimmer,"trimmer-tr"),r.tr.stemmer=function(){var i=r.stemmerSupport.Among,e=r.stemmerSupport.SnowballProgram,n=new function(){function r(r,i,e){for(;;){var n=fr.limit-fr.cursor;if(fr.in_grouping_b(r,i,e)){fr.cursor=fr.limit-n;break}if(fr.cursor=fr.limit-n,fr.cursor<=fr.limit_backward)return!1;fr.cursor--}return!0}function n(){var i,e;i=fr.limit-fr.cursor,r(cr,97,305);for(var n=0;nfr.limit_backward&&(fr.cursor--,e=fr.limit-fr.cursor,i()))?(fr.cursor=fr.limit-e,!0):(fr.cursor=fr.limit-n,r()?(fr.cursor=fr.limit-n,!1):(fr.cursor=fr.limit-n,!(fr.cursor<=fr.limit_backward)&&(fr.cursor--,!!i()&&(fr.cursor=fr.limit-n,!0))))}function u(r){return t(r,function(){return fr.in_grouping_b(cr,97,305)})}function o(){return u(function(){return fr.eq_s_b(1,"n")})}function s(){return u(function(){return fr.eq_s_b(1,"y")})}function c(){return fr.find_among_b(D,10)&&t(function(){return fr.in_grouping_b(lr,105,305)},function(){return fr.out_grouping_b(cr,97,305)})}function l(){return n()&&fr.in_grouping_b(lr,105,305)&&u(function(){return fr.eq_s_b(1,"s")})}function a(){return fr.find_among_b(G,2)}function m(){return n()&&fr.find_among_b(I,4)&&o()}function d(){return n()&&fr.find_among_b(M,4)}function f(){return n()&&fr.find_among_b(N,2)}function b(){return n()&&fr.find_among_b(V,4)&&s()}function w(){return n()&&fr.find_among_b(X,4)}function _(){return n()&&fr.find_among_b(Y,4)&&s()}function k(){return fr.find_among_b($,4)}function p(){return n()&&fr.find_among_b(rr,2)}function g(){return n()&&fr.find_among_b(er,8)}function y(){return n()&&fr.find_among_b(tr,32)&&s()}function z(){return fr.find_among_b(ur,8)&&s()}function v(){return n()&&fr.find_among_b(or,4)&&s()}function h(){var r=fr.limit-fr.cursor;return!(v()||(fr.cursor=fr.limit-r,y()||(fr.cursor=fr.limit-r,z()||(fr.cursor=fr.limit-r,fr.eq_s_b(3,"ken")&&s()))))}function q(){if(fr.find_among_b(nr,2)){var r=fr.limit-fr.cursor;if(k()||(fr.cursor=fr.limit-r,p()||(fr.cursor=fr.limit-r,b()||(fr.cursor=fr.limit-r,w()||(fr.cursor=fr.limit-r,_()||(fr.cursor=fr.limit-r))))),v())return!1}return!0}function C(){if(!n()||!fr.find_among_b(ir,4))return!0;var r=fr.limit-fr.cursor;return!y()&&(fr.cursor=fr.limit-r,!z())}function P(){var r,i=fr.limit-fr.cursor;if(fr.ket=fr.cursor,Z=!0,h()&&(fr.cursor=fr.limit-i,q()&&(fr.cursor=fr.limit-i,function(){if(p()){fr.bra=fr.cursor,fr.slice_del();var r=fr.limit-fr.cursor;return fr.ket=fr.cursor,g()||(fr.cursor=fr.limit-r,y()||(fr.cursor=fr.limit-r,z()||(fr.cursor=fr.limit-r,v()||(fr.cursor=fr.limit-r)))),Z=!1,!1}return!0}()&&(fr.cursor=fr.limit-i,C()&&(fr.cursor=fr.limit-i,function(){var r,i=fr.limit-fr.cursor;return!(k()||(fr.cursor=fr.limit-i,_()||(fr.cursor=fr.limit-i,w()||(fr.cursor=fr.limit-i,b()))))||(fr.bra=fr.cursor,fr.slice_del(),r=fr.limit-fr.cursor,fr.ket=fr.cursor,v()||(fr.cursor=fr.limit-r),!1)}()))))){if(fr.cursor=fr.limit-i,!g())return;fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,r=fr.limit-fr.cursor,k()||(fr.cursor=fr.limit-r,p()||(fr.cursor=fr.limit-r,b()||(fr.cursor=fr.limit-r,w()||(fr.cursor=fr.limit-r,_()||(fr.cursor=fr.limit-r))))),v()||(fr.cursor=fr.limit-r)}fr.bra=fr.cursor,fr.slice_del()}function F(){var r,i,e,n;if(fr.ket=fr.cursor,fr.eq_s_b(2,"ki")){if(r=fr.limit-fr.cursor,d())return fr.bra=fr.cursor,fr.slice_del(),i=fr.limit-fr.cursor,fr.ket=fr.cursor,p()?(fr.bra=fr.cursor,fr.slice_del(),F()):(fr.cursor=fr.limit-i,c()&&(fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F()))),!0;if(fr.cursor=fr.limit-r,m()){if(fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,e=fr.limit-fr.cursor,a())fr.bra=fr.cursor,fr.slice_del();else{if(fr.cursor=fr.limit-e,fr.ket=fr.cursor,!c()&&(fr.cursor=fr.limit-e,!l()&&(fr.cursor=fr.limit-e,!F())))return!0;fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F())}return!0}if(fr.cursor=fr.limit-r,f()){if(n=fr.limit-fr.cursor,a())fr.bra=fr.cursor,fr.slice_del();else if(fr.cursor=fr.limit-n,l())fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F());else if(fr.cursor=fr.limit-n,!F())return!1;return!0}}return!1}function S(r){if(fr.ket=fr.cursor,!f()&&(fr.cursor=fr.limit-r,!n()||!fr.find_among_b(K,2)))return!1;var i=fr.limit-fr.cursor;if(a())fr.bra=fr.cursor,fr.slice_del();else if(fr.cursor=fr.limit-i,l())fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F());else if(fr.cursor=fr.limit-i,!F())return!1;return!0}function W(r){if(fr.ket=fr.cursor,!(n()&&fr.find_among_b(Q,2)||(fr.cursor=fr.limit-r,n()&&fr.find_among_b(H,4))))return!1;var i=fr.limit-fr.cursor;return!(!l()&&(fr.cursor=fr.limit-i,!a()))&&(fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F()),!0)}function L(){var r,i=fr.limit-fr.cursor;return fr.ket=fr.cursor,!!(m()||(fr.cursor=fr.limit-i,n()&&fr.find_among_b(R,2)&&s()))&&(fr.bra=fr.cursor,fr.slice_del(),r=fr.limit-fr.cursor,fr.ket=fr.cursor,!(!p()||(fr.bra=fr.cursor,fr.slice_del(),!F()))||(fr.cursor=fr.limit-r,fr.ket=fr.cursor,!(c()||(fr.cursor=fr.limit-r,l()||(fr.cursor=fr.limit-r,F())))||(fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F()),!0)))}function x(){var r,i,e=fr.limit-fr.cursor;if(fr.ket=fr.cursor,!(d()||(fr.cursor=fr.limit-e,n()&&fr.in_grouping_b(lr,105,305)&&s()||(fr.cursor=fr.limit-e,n()&&fr.find_among_b(J,2)&&s()))))return!1;if(fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,r=fr.limit-fr.cursor,c())fr.bra=fr.cursor,fr.slice_del(),i=fr.limit-fr.cursor,fr.ket=fr.cursor,p()||(fr.cursor=fr.limit-i);else if(fr.cursor=fr.limit-r,!p())return!0;return fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,F(),!0}function A(){var r,i,e=fr.limit-fr.cursor;if(fr.ket=fr.cursor,p())return fr.bra=fr.cursor,fr.slice_del(),void F();if(fr.cursor=fr.limit-e,fr.ket=fr.cursor,n()&&fr.find_among_b(U,2)&&o())if(fr.bra=fr.cursor,fr.slice_del(),r=fr.limit-fr.cursor,fr.ket=fr.cursor,a())fr.bra=fr.cursor,fr.slice_del();else{if(fr.cursor=fr.limit-r,fr.ket=fr.cursor,!c()&&(fr.cursor=fr.limit-r,!l())){if(fr.cursor=fr.limit-r,fr.ket=fr.cursor,!p())return;if(fr.bra=fr.cursor,fr.slice_del(),!F())return}fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F())}else if(fr.cursor=fr.limit-e,!S(e)&&(fr.cursor=fr.limit-e,!W(e))){if(fr.cursor=fr.limit-e,fr.ket=fr.cursor,n()&&fr.find_among_b(O,4))return fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,i=fr.limit-fr.cursor,void(c()?(fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F())):(fr.cursor=fr.limit-i,p()?(fr.bra=fr.cursor,fr.slice_del(),F()):(fr.cursor=fr.limit-i,F())));if(fr.cursor=fr.limit-e,!L()){if(fr.cursor=fr.limit-e,a())return fr.bra=fr.cursor,void fr.slice_del();fr.cursor=fr.limit-e,F()||(fr.cursor=fr.limit-e,x()||(fr.cursor=fr.limit-e,fr.ket=fr.cursor,(c()||(fr.cursor=fr.limit-e,l()))&&(fr.bra=fr.cursor,fr.slice_del(),fr.ket=fr.cursor,p()&&(fr.bra=fr.cursor,fr.slice_del(),F()))))}}}function E(r,i,e){if(fr.cursor=fr.limit-r,function(){for(;;){var r=fr.limit-fr.cursor;if(fr.in_grouping_b(cr,97,305)){fr.cursor=fr.limit-r;break}if(fr.cursor=fr.limit-r,fr.cursor<=fr.limit_backward)return!1;fr.cursor--}return!0}()){var n=fr.limit-fr.cursor;if(!fr.eq_s_b(1,i)&&(fr.cursor=fr.limit-n,!fr.eq_s_b(1,e)))return!0;fr.cursor=fr.limit-r;var t=fr.cursor;return fr.insert(fr.cursor,fr.cursor,e),fr.cursor=t,!1}return!0}function j(r,i,e){for(;!fr.eq_s(i,e);){if(fr.cursor>=fr.limit)return!0;fr.cursor++}return(B=i)!=fr.limit||(fr.cursor=r,!1)}function T(){var r=fr.cursor;return!function(){var r=fr.cursor;return!j(r,2,"ad")||(fr.cursor=r,!j(r,5,"soyad"))}()&&(fr.limit_backward=r,fr.cursor=fr.limit,function(){var r=fr.limit-fr.cursor;(fr.eq_s_b(1,"d")||(fr.cursor=fr.limit-r,fr.eq_s_b(1,"g")))&&E(r,"a","ı")&&E(r,"e","i")&&E(r,"o","u")&&E(r,"ö","ü")}(),fr.cursor=fr.limit,function(){var r;if(fr.ket=fr.cursor,r=fr.find_among_b(sr,4))switch(fr.bra=fr.cursor,r){case 1:fr.slice_from("p");break;case 2:fr.slice_from("ç");break;case 3:fr.slice_from("t");break;case 4:fr.slice_from("k")}}(),!0)}var Z,B,D=[new i("m",-1,-1),new i("n",-1,-1),new i("miz",-1,-1),new i("niz",-1,-1),new i("muz",-1,-1),new i("nuz",-1,-1),new i("müz",-1,-1),new i("nüz",-1,-1),new i("mız",-1,-1),new i("nız",-1,-1)],G=[new i("leri",-1,-1),new i("ları",-1,-1)],H=[new i("ni",-1,-1),new i("nu",-1,-1),new i("nü",-1,-1),new i("nı",-1,-1)],I=[new i("in",-1,-1),new i("un",-1,-1),new i("ün",-1,-1),new i("ın",-1,-1)],J=[new i("a",-1,-1),new i("e",-1,-1)],K=[new i("na",-1,-1),new i("ne",-1,-1)],M=[new i("da",-1,-1),new i("ta",-1,-1),new i("de",-1,-1),new i("te",-1,-1)],N=[new i("nda",-1,-1),new i("nde",-1,-1)],O=[new i("dan",-1,-1),new i("tan",-1,-1),new i("den",-1,-1),new i("ten",-1,-1)],Q=[new i("ndan",-1,-1),new i("nden",-1,-1)],R=[new i("la",-1,-1),new i("le",-1,-1)],U=[new i("ca",-1,-1),new i("ce",-1,-1)],V=[new i("im",-1,-1),new i("um",-1,-1),new i("üm",-1,-1),new i("ım",-1,-1)],X=[new i("sin",-1,-1),new i("sun",-1,-1),new i("sün",-1,-1),new i("sın",-1,-1)],Y=[new i("iz",-1,-1),new i("uz",-1,-1),new i("üz",-1,-1),new i("ız",-1,-1)],$=[new i("siniz",-1,-1),new i("sunuz",-1,-1),new i("sünüz",-1,-1),new i("sınız",-1,-1)],rr=[new i("lar",-1,-1),new i("ler",-1,-1)],ir=[new i("niz",-1,-1),new i("nuz",-1,-1),new i("nüz",-1,-1),new i("nız",-1,-1)],er=[new i("dir",-1,-1),new i("tir",-1,-1),new i("dur",-1,-1),new i("tur",-1,-1),new i("dür",-1,-1),new i("tür",-1,-1),new i("dır",-1,-1),new i("tır",-1,-1)],nr=[new i("casına",-1,-1),new i("cesine",-1,-1)],tr=[new i("di",-1,-1),new i("ti",-1,-1),new i("dik",-1,-1),new i("tik",-1,-1),new i("duk",-1,-1),new i("tuk",-1,-1),new i("dük",-1,-1),new i("tük",-1,-1),new i("dık",-1,-1),new i("tık",-1,-1),new i("dim",-1,-1),new i("tim",-1,-1),new i("dum",-1,-1),new i("tum",-1,-1),new i("düm",-1,-1),new i("tüm",-1,-1),new i("dım",-1,-1),new i("tım",-1,-1),new i("din",-1,-1),new i("tin",-1,-1),new i("dun",-1,-1),new i("tun",-1,-1),new i("dün",-1,-1),new i("tün",-1,-1),new i("dın",-1,-1),new i("tın",-1,-1),new i("du",-1,-1),new i("tu",-1,-1),new i("dü",-1,-1),new i("tü",-1,-1),new i("dı",-1,-1),new i("tı",-1,-1)],ur=[new i("sa",-1,-1),new i("se",-1,-1),new i("sak",-1,-1),new i("sek",-1,-1),new i("sam",-1,-1),new i("sem",-1,-1),new i("san",-1,-1),new i("sen",-1,-1)],or=[new i("miş",-1,-1),new i("muş",-1,-1),new i("müş",-1,-1),new i("mış",-1,-1)],sr=[new i("b",-1,1),new i("c",-1,2),new i("d",-1,3),new i("ğ",-1,4)],cr=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1],lr=[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1],ar=[65],mr=[65],dr=[["a",[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],97,305],["e",[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130],101,252],["ı",[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],97,305],["i",[17],101,105],["o",ar,111,117],["ö",mr,246,252],["u",ar,111,117]],fr=new e;this.setCurrent=function(r){fr.setCurrent(r)},this.getCurrent=function(){return fr.getCurrent()},this.stem=function(){return!!(function(){for(var r,i=fr.cursor,e=2;;){for(r=fr.cursor;!fr.in_grouping(cr,97,305);){if(fr.cursor>=fr.limit)return fr.cursor=r,!(e>0||(fr.cursor=i,0));fr.cursor++}e--}}()&&(fr.limit_backward=fr.cursor,fr.cursor=fr.limit,P(),fr.cursor=fr.limit,Z&&(A(),fr.cursor=fr.limit_backward,T())))}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.tr.stemmer,"stemmer-tr"),r.tr.stopWordFilter=r.generateStopWordFilter("acaba altmış altı ama ancak arada aslında ayrıca bana bazı belki ben benden beni benim beri beş bile bin bir biri birkaç birkez birçok birşey birşeyi biz bizden bize bizi bizim bu buna bunda bundan bunlar bunları bunların bunu bunun burada böyle böylece da daha dahi de defa değil diye diğer doksan dokuz dolayı dolayısıyla dört edecek eden ederek edilecek ediliyor edilmesi ediyor elli en etmesi etti ettiği ettiğini eğer gibi göre halen hangi hatta hem henüz hep hepsi her herhangi herkesin hiç hiçbir iki ile ilgili ise itibaren itibariyle için işte kadar karşın katrilyon kendi kendilerine kendini kendisi kendisine kendisini kez ki kim kimden kime kimi kimse kırk milyar milyon mu mü mı nasıl ne neden nedenle nerde nerede nereye niye niçin o olan olarak oldu olduklarını olduğu olduğunu olmadı olmadığı olmak olması olmayan olmaz olsa olsun olup olur olursa oluyor on ona ondan onlar onlardan onları onların onu onun otuz oysa pek rağmen sadece sanki sekiz seksen sen senden seni senin siz sizden sizi sizin tarafından trilyon tüm var vardı ve veya ya yani yapacak yapmak yaptı yaptıkları yaptığı yaptığını yapılan yapılması yapıyor yedi yerine yetmiş yine yirmi yoksa yüz zaten çok çünkü öyle üzere üç şey şeyden şeyi şeyler şu şuna şunda şundan şunları şunu şöyle".split(" ")),r.Pipeline.registerFunction(r.tr.stopWordFilter,"stopWordFilter-tr")}}); \ No newline at end of file diff --git a/docs/assets/javascripts/lunr/tinyseg.js b/docs/assets/javascripts/lunr/tinyseg.js new file mode 100644 index 000000000..cce65c1e7 --- /dev/null +++ b/docs/assets/javascripts/lunr/tinyseg.js @@ -0,0 +1 @@ +!function(_,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(_.lunr)}(this,function(){return function(_){function t(){var _={"[一二三四五六七八九十百千万億兆]":"M","[一-龠々〆ヵヶ]":"H","[ぁ-ん]":"I","[ァ-ヴーア-ン゙ー]":"K","[a-zA-Za-zA-Z]":"A","[0-90-9]":"N"};this.chartype_=[];for(var t in _){var H=new RegExp;H.compile(t),this.chartype_.push([H,_[t]])}return this.BIAS__=-332,this.BC1__={HH:6,II:2461,KH:406,OH:-1378},this.BC2__={AA:-3267,AI:2744,AN:-878,HH:-4070,HM:-1711,HN:4012,HO:3761,IA:1327,IH:-1184,II:-1332,IK:1721,IO:5492,KI:3831,KK:-8741,MH:-3132,MK:3334,OO:-2920},this.BC3__={HH:996,HI:626,HK:-721,HN:-1307,HO:-836,IH:-301,KK:2762,MK:1079,MM:4034,OA:-1652,OH:266},this.BP1__={BB:295,OB:304,OO:-125,UB:352},this.BP2__={BO:60,OO:-1762},this.BQ1__={BHH:1150,BHM:1521,BII:-1158,BIM:886,BMH:1208,BNH:449,BOH:-91,BOO:-2597,OHI:451,OIH:-296,OKA:1851,OKH:-1020,OKK:904,OOO:2965},this.BQ2__={BHH:118,BHI:-1159,BHM:466,BIH:-919,BKK:-1720,BKO:864,OHH:-1139,OHM:-181,OIH:153,UHI:-1146},this.BQ3__={BHH:-792,BHI:2664,BII:-299,BKI:419,BMH:937,BMM:8335,BNN:998,BOH:775,OHH:2174,OHM:439,OII:280,OKH:1798,OKI:-793,OKO:-2242,OMH:-2402,OOO:11699},this.BQ4__={BHH:-3895,BIH:3761,BII:-4654,BIK:1348,BKK:-1806,BMI:-3385,BOO:-12396,OAH:926,OHH:266,OHK:-2036,ONN:-973},this.BW1__={",と":660,",同":727,"B1あ":1404,"B1同":542,"、と":660,"、同":727,"」と":1682,"あっ":1505,"いう":1743,"いっ":-2055,"いる":672,"うし":-4817,"うん":665,"から":3472,"がら":600,"こう":-790,"こと":2083,"こん":-1262,"さら":-4143,"さん":4573,"した":2641,"して":1104,"すで":-3399,"そこ":1977,"それ":-871,"たち":1122,"ため":601,"った":3463,"つい":-802,"てい":805,"てき":1249,"でき":1127,"です":3445,"では":844,"とい":-4915,"とみ":1922,"どこ":3887,"ない":5713,"なっ":3015,"など":7379,"なん":-1113,"にし":2468,"には":1498,"にも":1671,"に対":-912,"の一":-501,"の中":741,"ませ":2448,"まで":1711,"まま":2600,"まる":-2155,"やむ":-1947,"よっ":-2565,"れた":2369,"れで":-913,"をし":1860,"を見":731,"亡く":-1886,"京都":2558,"取り":-2784,"大き":-2604,"大阪":1497,"平方":-2314,"引き":-1336,"日本":-195,"本当":-2423,"毎日":-2113,"目指":-724,"B1あ":1404,"B1同":542,"」と":1682},this.BW2__={"..":-11822,11:-669,"――":-5730,"−−":-13175,"いう":-1609,"うか":2490,"かし":-1350,"かも":-602,"から":-7194,"かれ":4612,"がい":853,"がら":-3198,"きた":1941,"くな":-1597,"こと":-8392,"この":-4193,"させ":4533,"され":13168,"さん":-3977,"しい":-1819,"しか":-545,"した":5078,"して":972,"しな":939,"その":-3744,"たい":-1253,"たた":-662,"ただ":-3857,"たち":-786,"たと":1224,"たは":-939,"った":4589,"って":1647,"っと":-2094,"てい":6144,"てき":3640,"てく":2551,"ては":-3110,"ても":-3065,"でい":2666,"でき":-1528,"でし":-3828,"です":-4761,"でも":-4203,"とい":1890,"とこ":-1746,"とと":-2279,"との":720,"とみ":5168,"とも":-3941,"ない":-2488,"なが":-1313,"など":-6509,"なの":2614,"なん":3099,"にお":-1615,"にし":2748,"にな":2454,"によ":-7236,"に対":-14943,"に従":-4688,"に関":-11388,"のか":2093,"ので":-7059,"のに":-6041,"のの":-6125,"はい":1073,"はが":-1033,"はず":-2532,"ばれ":1813,"まし":-1316,"まで":-6621,"まれ":5409,"めて":-3153,"もい":2230,"もの":-10713,"らか":-944,"らし":-1611,"らに":-1897,"りし":651,"りま":1620,"れた":4270,"れて":849,"れば":4114,"ろう":6067,"われ":7901,"を通":-11877,"んだ":728,"んな":-4115,"一人":602,"一方":-1375,"一日":970,"一部":-1051,"上が":-4479,"会社":-1116,"出て":2163,"分の":-7758,"同党":970,"同日":-913,"大阪":-2471,"委員":-1250,"少な":-1050,"年度":-8669,"年間":-1626,"府県":-2363,"手権":-1982,"新聞":-4066,"日新":-722,"日本":-7068,"日米":3372,"曜日":-601,"朝鮮":-2355,"本人":-2697,"東京":-1543,"然と":-1384,"社会":-1276,"立て":-990,"第に":-1612,"米国":-4268,"11":-669},this.BW3__={"あた":-2194,"あり":719,"ある":3846,"い.":-1185,"い。":-1185,"いい":5308,"いえ":2079,"いく":3029,"いた":2056,"いっ":1883,"いる":5600,"いわ":1527,"うち":1117,"うと":4798,"えと":1454,"か.":2857,"か。":2857,"かけ":-743,"かっ":-4098,"かに":-669,"から":6520,"かり":-2670,"が,":1816,"が、":1816,"がき":-4855,"がけ":-1127,"がっ":-913,"がら":-4977,"がり":-2064,"きた":1645,"けど":1374,"こと":7397,"この":1542,"ころ":-2757,"さい":-714,"さを":976,"し,":1557,"し、":1557,"しい":-3714,"した":3562,"して":1449,"しな":2608,"しま":1200,"す.":-1310,"す。":-1310,"する":6521,"ず,":3426,"ず、":3426,"ずに":841,"そう":428,"た.":8875,"た。":8875,"たい":-594,"たの":812,"たり":-1183,"たる":-853,"だ.":4098,"だ。":4098,"だっ":1004,"った":-4748,"って":300,"てい":6240,"てお":855,"ても":302,"です":1437,"でに":-1482,"では":2295,"とう":-1387,"とし":2266,"との":541,"とも":-3543,"どう":4664,"ない":1796,"なく":-903,"など":2135,"に,":-1021,"に、":-1021,"にし":1771,"にな":1906,"には":2644,"の,":-724,"の、":-724,"の子":-1e3,"は,":1337,"は、":1337,"べき":2181,"まし":1113,"ます":6943,"まっ":-1549,"まで":6154,"まれ":-793,"らし":1479,"られ":6820,"るる":3818,"れ,":854,"れ、":854,"れた":1850,"れて":1375,"れば":-3246,"れる":1091,"われ":-605,"んだ":606,"んで":798,"カ月":990,"会議":860,"入り":1232,"大会":2217,"始め":1681,"市":965,"新聞":-5055,"日,":974,"日、":974,"社会":2024,"カ月":990},this.TC1__={AAA:1093,HHH:1029,HHM:580,HII:998,HOH:-390,HOM:-331,IHI:1169,IOH:-142,IOI:-1015,IOM:467,MMH:187,OOI:-1832},this.TC2__={HHO:2088,HII:-1023,HMM:-1154,IHI:-1965,KKH:703,OII:-2649},this.TC3__={AAA:-294,HHH:346,HHI:-341,HII:-1088,HIK:731,HOH:-1486,IHH:128,IHI:-3041,IHO:-1935,IIH:-825,IIM:-1035,IOI:-542,KHH:-1216,KKA:491,KKH:-1217,KOK:-1009,MHH:-2694,MHM:-457,MHO:123,MMH:-471,NNH:-1689,NNO:662,OHO:-3393},this.TC4__={HHH:-203,HHI:1344,HHK:365,HHM:-122,HHN:182,HHO:669,HIH:804,HII:679,HOH:446,IHH:695,IHO:-2324,IIH:321,III:1497,IIO:656,IOO:54,KAK:4845,KKA:3386,KKK:3065,MHH:-405,MHI:201,MMH:-241,MMM:661,MOM:841},this.TQ1__={BHHH:-227,BHHI:316,BHIH:-132,BIHH:60,BIII:1595,BNHH:-744,BOHH:225,BOOO:-908,OAKK:482,OHHH:281,OHIH:249,OIHI:200,OIIH:-68},this.TQ2__={BIHH:-1401,BIII:-1033,BKAK:-543,BOOO:-5591},this.TQ3__={BHHH:478,BHHM:-1073,BHIH:222,BHII:-504,BIIH:-116,BIII:-105,BMHI:-863,BMHM:-464,BOMH:620,OHHH:346,OHHI:1729,OHII:997,OHMH:481,OIHH:623,OIIH:1344,OKAK:2792,OKHH:587,OKKA:679,OOHH:110,OOII:-685},this.TQ4__={BHHH:-721,BHHM:-3604,BHII:-966,BIIH:-607,BIII:-2181,OAAA:-2763,OAKK:180,OHHH:-294,OHHI:2446,OHHO:480,OHIH:-1573,OIHH:1935,OIHI:-493,OIIH:626,OIII:-4007,OKAK:-8156},this.TW1__={"につい":-4681,"東京都":2026},this.TW2__={"ある程":-2049,"いった":-1256,"ころが":-2434,"しょう":3873,"その後":-4430,"だって":-1049,"ていた":1833,"として":-4657,"ともに":-4517,"もので":1882,"一気に":-792,"初めて":-1512,"同時に":-8097,"大きな":-1255,"対して":-2721,"社会党":-3216},this.TW3__={"いただ":-1734,"してい":1314,"として":-4314,"につい":-5483,"にとっ":-5989,"に当た":-6247,"ので,":-727,"ので、":-727,"のもの":-600,"れから":-3752,"十二月":-2287},this.TW4__={"いう.":8576,"いう。":8576,"からな":-2348,"してい":2958,"たが,":1516,"たが、":1516,"ている":1538,"という":1349,"ました":5543,"ません":1097,"ようと":-4258,"よると":5865},this.UC1__={A:484,K:93,M:645,O:-505},this.UC2__={A:819,H:1059,I:409,M:3987,N:5775,O:646},this.UC3__={A:-1370,I:2311},this.UC4__={A:-2643,H:1809,I:-1032,K:-3450,M:3565,N:3876,O:6646},this.UC5__={H:313,I:-1238,K:-799,M:539,O:-831},this.UC6__={H:-506,I:-253,K:87,M:247,O:-387},this.UP1__={O:-214},this.UP2__={B:69,O:935},this.UP3__={B:189},this.UQ1__={BH:21,BI:-12,BK:-99,BN:142,BO:-56,OH:-95,OI:477,OK:410,OO:-2422},this.UQ2__={BH:216,BI:113,OK:1759},this.UQ3__={BA:-479,BH:42,BI:1913,BK:-7198,BM:3160,BN:6427,BO:14761,OI:-827,ON:-3212},this.UW1__={",":156,"、":156,"「":-463,"あ":-941,"う":-127,"が":-553,"き":121,"こ":505,"で":-201,"と":-547,"ど":-123,"に":-789,"の":-185,"は":-847,"も":-466,"や":-470,"よ":182,"ら":-292,"り":208,"れ":169,"を":-446,"ん":-137,"・":-135,"主":-402,"京":-268,"区":-912,"午":871,"国":-460,"大":561,"委":729,"市":-411,"日":-141,"理":361,"生":-408,"県":-386,"都":-718,"「":-463,"・":-135},this.UW2__={",":-829,"、":-829,"〇":892,"「":-645,"」":3145,"あ":-538,"い":505,"う":134,"お":-502,"か":1454,"が":-856,"く":-412,"こ":1141,"さ":878,"ざ":540,"し":1529,"す":-675,"せ":300,"そ":-1011,"た":188,"だ":1837,"つ":-949,"て":-291,"で":-268,"と":-981,"ど":1273,"な":1063,"に":-1764,"の":130,"は":-409,"ひ":-1273,"べ":1261,"ま":600,"も":-1263,"や":-402,"よ":1639,"り":-579,"る":-694,"れ":571,"を":-2516,"ん":2095,"ア":-587,"カ":306,"キ":568,"ッ":831,"三":-758,"不":-2150,"世":-302,"中":-968,"主":-861,"事":492,"人":-123,"会":978,"保":362,"入":548,"初":-3025,"副":-1566,"北":-3414,"区":-422,"大":-1769,"天":-865,"太":-483,"子":-1519,"学":760,"実":1023,"小":-2009,"市":-813,"年":-1060,"強":1067,"手":-1519,"揺":-1033,"政":1522,"文":-1355,"新":-1682,"日":-1815,"明":-1462,"最":-630,"朝":-1843,"本":-1650,"東":-931,"果":-665,"次":-2378,"民":-180,"気":-1740,"理":752,"発":529,"目":-1584,"相":-242,"県":-1165,"立":-763,"第":810,"米":509,"自":-1353,"行":838,"西":-744,"見":-3874,"調":1010,"議":1198,"込":3041,"開":1758,"間":-1257,"「":-645,"」":3145,"ッ":831,"ア":-587,"カ":306,"キ":568},this.UW3__={",":4889,1:-800,"−":-1723,"、":4889,"々":-2311,"〇":5827,"」":2670,"〓":-3573,"あ":-2696,"い":1006,"う":2342,"え":1983,"お":-4864,"か":-1163,"が":3271,"く":1004,"け":388,"げ":401,"こ":-3552,"ご":-3116,"さ":-1058,"し":-395,"す":584,"せ":3685,"そ":-5228,"た":842,"ち":-521,"っ":-1444,"つ":-1081,"て":6167,"で":2318,"と":1691,"ど":-899,"な":-2788,"に":2745,"の":4056,"は":4555,"ひ":-2171,"ふ":-1798,"へ":1199,"ほ":-5516,"ま":-4384,"み":-120,"め":1205,"も":2323,"や":-788,"よ":-202,"ら":727,"り":649,"る":5905,"れ":2773,"わ":-1207,"を":6620,"ん":-518,"ア":551,"グ":1319,"ス":874,"ッ":-1350,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278,"・":-3794,"一":-1619,"下":-1759,"世":-2087,"両":3815,"中":653,"主":-758,"予":-1193,"二":974,"人":2742,"今":792,"他":1889,"以":-1368,"低":811,"何":4265,"作":-361,"保":-2439,"元":4858,"党":3593,"全":1574,"公":-3030,"六":755,"共":-1880,"円":5807,"再":3095,"分":457,"初":2475,"別":1129,"前":2286,"副":4437,"力":365,"動":-949,"務":-1872,"化":1327,"北":-1038,"区":4646,"千":-2309,"午":-783,"協":-1006,"口":483,"右":1233,"各":3588,"合":-241,"同":3906,"和":-837,"員":4513,"国":642,"型":1389,"場":1219,"外":-241,"妻":2016,"学":-1356,"安":-423,"実":-1008,"家":1078,"小":-513,"少":-3102,"州":1155,"市":3197,"平":-1804,"年":2416,"広":-1030,"府":1605,"度":1452,"建":-2352,"当":-3885,"得":1905,"思":-1291,"性":1822,"戸":-488,"指":-3973,"政":-2013,"教":-1479,"数":3222,"文":-1489,"新":1764,"日":2099,"旧":5792,"昨":-661,"時":-1248,"曜":-951,"最":-937,"月":4125,"期":360,"李":3094,"村":364,"東":-805,"核":5156,"森":2438,"業":484,"氏":2613,"民":-1694,"決":-1073,"法":1868,"海":-495,"無":979,"物":461,"特":-3850,"生":-273,"用":914,"町":1215,"的":7313,"直":-1835,"省":792,"県":6293,"知":-1528,"私":4231,"税":401,"立":-960,"第":1201,"米":7767,"系":3066,"約":3663,"級":1384,"統":-4229,"総":1163,"線":1255,"者":6457,"能":725,"自":-2869,"英":785,"見":1044,"調":-562,"財":-733,"費":1777,"車":1835,"軍":1375,"込":-1504,"通":-1136,"選":-681,"郎":1026,"郡":4404,"部":1200,"金":2163,"長":421,"開":-1432,"間":1302,"関":-1282,"雨":2009,"電":-1045,"非":2066,"駅":1620,"1":-800,"」":2670,"・":-3794,"ッ":-1350,"ア":551,"グ":1319,"ス":874,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278},this.UW4__={",":3930,".":3508,"―":-4841,"、":3930,"。":3508,"〇":4999,"「":1895,"」":3798,"〓":-5156,"あ":4752,"い":-3435,"う":-640,"え":-2514,"お":2405,"か":530,"が":6006,"き":-4482,"ぎ":-3821,"く":-3788,"け":-4376,"げ":-4734,"こ":2255,"ご":1979,"さ":2864,"し":-843,"じ":-2506,"す":-731,"ず":1251,"せ":181,"そ":4091,"た":5034,"だ":5408,"ち":-3654,"っ":-5882,"つ":-1659,"て":3994,"で":7410,"と":4547,"な":5433,"に":6499,"ぬ":1853,"ね":1413,"の":7396,"は":8578,"ば":1940,"ひ":4249,"び":-4134,"ふ":1345,"へ":6665,"べ":-744,"ほ":1464,"ま":1051,"み":-2082,"む":-882,"め":-5046,"も":4169,"ゃ":-2666,"や":2795,"ょ":-1544,"よ":3351,"ら":-2922,"り":-9726,"る":-14896,"れ":-2613,"ろ":-4570,"わ":-1783,"を":13150,"ん":-2352,"カ":2145,"コ":1789,"セ":1287,"ッ":-724,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637,"・":-4371,"ー":-11870,"一":-2069,"中":2210,"予":782,"事":-190,"井":-1768,"人":1036,"以":544,"会":950,"体":-1286,"作":530,"側":4292,"先":601,"党":-2006,"共":-1212,"内":584,"円":788,"初":1347,"前":1623,"副":3879,"力":-302,"動":-740,"務":-2715,"化":776,"区":4517,"協":1013,"参":1555,"合":-1834,"和":-681,"員":-910,"器":-851,"回":1500,"国":-619,"園":-1200,"地":866,"場":-1410,"塁":-2094,"士":-1413,"多":1067,"大":571,"子":-4802,"学":-1397,"定":-1057,"寺":-809,"小":1910,"屋":-1328,"山":-1500,"島":-2056,"川":-2667,"市":2771,"年":374,"庁":-4556,"後":456,"性":553,"感":916,"所":-1566,"支":856,"改":787,"政":2182,"教":704,"文":522,"方":-856,"日":1798,"時":1829,"最":845,"月":-9066,"木":-485,"来":-442,"校":-360,"業":-1043,"氏":5388,"民":-2716,"気":-910,"沢":-939,"済":-543,"物":-735,"率":672,"球":-1267,"生":-1286,"産":-1101,"田":-2900,"町":1826,"的":2586,"目":922,"省":-3485,"県":2997,"空":-867,"立":-2112,"第":788,"米":2937,"系":786,"約":2171,"経":1146,"統":-1169,"総":940,"線":-994,"署":749,"者":2145,"能":-730,"般":-852,"行":-792,"規":792,"警":-1184,"議":-244,"谷":-1e3,"賞":730,"車":-1481,"軍":1158,"輪":-1433,"込":-3370,"近":929,"道":-1291,"選":2596,"郎":-4866,"都":1192,"野":-1100,"銀":-2213,"長":357,"間":-2344,"院":-2297,"際":-2604,"電":-878,"領":-1659,"題":-792,"館":-1984,"首":1749,"高":2120,"「":1895,"」":3798,"・":-4371,"ッ":-724,"ー":-11870,"カ":2145,"コ":1789,"セ":1287,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637},this.UW5__={",":465,".":-299,1:-514,E2:-32768,"]":-2762,"、":465,"。":-299,"「":363,"あ":1655,"い":331,"う":-503,"え":1199,"お":527,"か":647,"が":-421,"き":1624,"ぎ":1971,"く":312,"げ":-983,"さ":-1537,"し":-1371,"す":-852,"だ":-1186,"ち":1093,"っ":52,"つ":921,"て":-18,"で":-850,"と":-127,"ど":1682,"な":-787,"に":-1224,"の":-635,"は":-578,"べ":1001,"み":502,"め":865,"ゃ":3350,"ょ":854,"り":-208,"る":429,"れ":504,"わ":419,"を":-1264,"ん":327,"イ":241,"ル":451,"ン":-343,"中":-871,"京":722,"会":-1153,"党":-654,"務":3519,"区":-901,"告":848,"員":2104,"大":-1296,"学":-548,"定":1785,"嵐":-1304,"市":-2991,"席":921,"年":1763,"思":872,"所":-814,"挙":1618,"新":-1682,"日":218,"月":-4353,"査":932,"格":1356,"機":-1508,"氏":-1347,"田":240,"町":-3912,"的":-3149,"相":1319,"省":-1052,"県":-4003,"研":-997,"社":-278,"空":-813,"統":1955,"者":-2233,"表":663,"語":-1073,"議":1219,"選":-1018,"郎":-368,"長":786,"間":1191,"題":2368,"館":-689,"1":-514,"E2":-32768,"「":363,"イ":241,"ル":451,"ン":-343},this.UW6__={",":227,".":808,1:-270,E1:306,"、":227,"。":808,"あ":-307,"う":189,"か":241,"が":-73,"く":-121,"こ":-200,"じ":1782,"す":383,"た":-428,"っ":573,"て":-1014,"で":101,"と":-105,"な":-253,"に":-149,"の":-417,"は":-236,"も":-206,"り":187,"る":-135,"を":195,"ル":-673,"ン":-496,"一":-277,"中":201,"件":-800,"会":624,"前":302,"区":1792,"員":-1212,"委":798,"学":-960,"市":887,"広":-695,"後":535,"業":-697,"相":753,"社":-507,"福":974,"空":-822,"者":1811,"連":463,"郎":1082,"1":-270,"E1":306,"ル":-673,"ン":-496},this}t.prototype.ctype_=function(_){for(var t in this.chartype_)if(_.match(this.chartype_[t][0]))return this.chartype_[t][1];return"O"},t.prototype.ts_=function(_){return _||0},t.prototype.segment=function(_){if(null==_||void 0==_||""==_)return[];var t=[],H=["B3","B2","B1"],s=["O","O","O"],h=_.split("");for(K=0;K0&&(t.push(i),i="",N="B"),I=O,O=B,B=N,i+=H[K]}return t.push(i),t},_.TinySegmenter=t}}); \ No newline at end of file diff --git a/docs/assets/javascripts/modernizr.1aa3b519.js b/docs/assets/javascripts/modernizr.1aa3b519.js new file mode 100644 index 000000000..14e111fc3 --- /dev/null +++ b/docs/assets/javascripts/modernizr.1aa3b519.js @@ -0,0 +1 @@ +!function(e,t){for(var n in t)e[n]=t[n]}(window,function(e){function t(r){if(n[r])return n[r].exports;var o=n[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,t),o.l=!0,o.exports}var n={};return t.m=e,t.c=n,t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="",t(t.s=4)}({4:function(e,t,n){"use strict";n(5)},5:function(e,t){!function(t){!function(e,t,n){function r(e,t){return typeof e===t}function o(e){var t=_.className,n=C._config.classPrefix||"";if(T&&(t=t.baseVal),C._config.enableJSClass){var r=new RegExp("(^|\\s)"+n+"no-js(\\s|$)");t=t.replace(r,"$1"+n+"js$2")}C._config.enableClasses&&(t+=" "+n+e.join(" "+n),T?_.className.baseVal=t:_.className=t)}function i(e,t){if("object"==typeof e)for(var n in e)b(e,n)&&i(n,e[n]);else{e=e.toLowerCase();var r=e.split("."),s=C[r[0]];if(2==r.length&&(s=s[r[1]]),void 0!==s)return C;t="function"==typeof t?t():t,1==r.length?C[r[0]]=t:(!C[r[0]]||C[r[0]]instanceof Boolean||(C[r[0]]=new Boolean(C[r[0]])),C[r[0]][r[1]]=t),o([(t&&0!=t?"":"no-")+r.join("-")]),C._trigger(e,t)}return C}function s(){return"function"!=typeof t.createElement?t.createElement(arguments[0]):T?t.createElementNS.call(t,"http://www.w3.org/2000/svg",arguments[0]):t.createElement.apply(t,arguments)}function a(){var e=t.body;return e||(e=s(T?"svg":"body"),e.fake=!0),e}function u(e,n,r,o){var i,u,l,f,c="modernizr",d=s("div"),p=a();if(parseInt(r,10))for(;r--;)l=s("div"),l.id=o?o[r]:c+(r+1),d.appendChild(l);return i=s("style"),i.type="text/css",i.id="s"+c,(p.fake?p:d).appendChild(i),p.appendChild(d),i.styleSheet?i.styleSheet.cssText=e:i.appendChild(t.createTextNode(e)),d.id=c,p.fake&&(p.style.background="",p.style.overflow="hidden",f=_.style.overflow,_.style.overflow="hidden",_.appendChild(p)),u=n(d,e),p.fake?(p.parentNode.removeChild(p),_.style.overflow=f,_.offsetHeight):d.parentNode.removeChild(d),!!u}function l(e,t){return!!~(""+e).indexOf(t)}function f(e){return e.replace(/([A-Z])/g,function(e,t){return"-"+t.toLowerCase()}).replace(/^ms-/,"-ms-")}function c(t,n,r){var o;if("getComputedStyle"in e){o=getComputedStyle.call(e,t,n);var i=e.console;if(null!==o)r&&(o=o.getPropertyValue(r));else if(i){var s=i.error?"error":"log";i[s].call(i,"getComputedStyle returning null, its possible modernizr test results are inaccurate")}}else o=!n&&t.currentStyle&&t.currentStyle[r];return o}function d(t,r){var o=t.length;if("CSS"in e&&"supports"in e.CSS){for(;o--;)if(e.CSS.supports(f(t[o]),r))return!0;return!1}if("CSSSupportsRule"in e){for(var i=[];o--;)i.push("("+f(t[o])+":"+r+")");return i=i.join(" or "),u("@supports ("+i+") { #modernizr { position: absolute; } }",function(e){return"absolute"==c(e,null,"position")})}return n}function p(e){return e.replace(/([a-z])-([a-z])/g,function(e,t,n){return t+n.toUpperCase()}).replace(/^-/,"")}function h(e,t,o,i){function a(){f&&(delete j.style,delete j.modElem)}if(i=!r(i,"undefined")&&i,!r(o,"undefined")){var u=d(e,o);if(!r(u,"undefined"))return u}for(var f,c,h,m,v,g=["modernizr","tspan","samp"];!j.style&&g.length;)f=!0,j.modElem=s(g.shift()),j.style=j.modElem.style;for(h=e.length,c=0;h>c;c++)if(m=e[c],v=j.style[m],l(m,"-")&&(m=p(m)),j.style[m]!==n){if(i||r(o,"undefined"))return a(),"pfx"!=t||m;try{j.style[m]=o}catch(e){}if(j.style[m]!=v)return a(),"pfx"!=t||m}return a(),!1}function m(e,t){return function(){return e.apply(t,arguments)}}function v(e,t,n){var o;for(var i in e)if(e[i]in t)return!1===n?e[i]:(o=t[e[i]],r(o,"function")?m(o,n||t):o);return!1}function g(e,t,n,o,i){var s=e.charAt(0).toUpperCase()+e.slice(1),a=(e+" "+k.join(s+" ")+s).split(" ");return r(t,"string")||r(t,"undefined")?h(a,t,o,i):(a=(e+" "+A.join(s+" ")+s).split(" "),v(a,t,n))}function y(e,t,r){return g(e,n,n,t,r)}var w=[],S={_version:"3.5.0",_config:{classPrefix:"",enableClasses:!0,enableJSClass:!0,usePrefixes:!0},_q:[],on:function(e,t){var n=this;setTimeout(function(){t(n[e])},0)},addTest:function(e,t,n){w.push({name:e,fn:t,options:n})},addAsyncTest:function(e){w.push({name:null,fn:e})}},C=function(){};C.prototype=S,C=new C;var b,x=[],_=t.documentElement,T="svg"===_.nodeName.toLowerCase();!function(){var e={}.hasOwnProperty;b=r(e,"undefined")||r(e.call,"undefined")?function(e,t){return t in e&&r(e.constructor.prototype[t],"undefined")}:function(t,n){return e.call(t,n)}}(),S._l={},S.on=function(e,t){this._l[e]||(this._l[e]=[]),this._l[e].push(t),C.hasOwnProperty(e)&&setTimeout(function(){C._trigger(e,C[e])},0)},S._trigger=function(e,t){if(this._l[e]){var n=this._l[e];setTimeout(function(){var e;for(e=0;e.md-nav__link{color:inherit}button[data-md-color-primary=pink]{background-color:#e91e63}[data-md-color-primary=pink] .md-typeset a{color:#e91e63}[data-md-color-primary=pink] .md-header,[data-md-color-primary=pink] .md-hero{background-color:#e91e63}[data-md-color-primary=pink] .md-nav__link--active,[data-md-color-primary=pink] .md-nav__link:active{color:#e91e63}[data-md-color-primary=pink] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=purple]{background-color:#ab47bc}[data-md-color-primary=purple] .md-typeset a{color:#ab47bc}[data-md-color-primary=purple] .md-header,[data-md-color-primary=purple] .md-hero{background-color:#ab47bc}[data-md-color-primary=purple] .md-nav__link--active,[data-md-color-primary=purple] .md-nav__link:active{color:#ab47bc}[data-md-color-primary=purple] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=deep-purple]{background-color:#7e57c2}[data-md-color-primary=deep-purple] .md-typeset a{color:#7e57c2}[data-md-color-primary=deep-purple] .md-header,[data-md-color-primary=deep-purple] .md-hero{background-color:#7e57c2}[data-md-color-primary=deep-purple] .md-nav__link--active,[data-md-color-primary=deep-purple] .md-nav__link:active{color:#7e57c2}[data-md-color-primary=deep-purple] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=indigo]{background-color:#3f51b5}[data-md-color-primary=indigo] .md-typeset a{color:#3f51b5}[data-md-color-primary=indigo] .md-header,[data-md-color-primary=indigo] .md-hero{background-color:#3f51b5}[data-md-color-primary=indigo] .md-nav__link--active,[data-md-color-primary=indigo] .md-nav__link:active{color:#3f51b5}[data-md-color-primary=indigo] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=blue]{background-color:#2196f3}[data-md-color-primary=blue] .md-typeset a{color:#2196f3}[data-md-color-primary=blue] .md-header,[data-md-color-primary=blue] .md-hero{background-color:#2196f3}[data-md-color-primary=blue] .md-nav__link--active,[data-md-color-primary=blue] .md-nav__link:active{color:#2196f3}[data-md-color-primary=blue] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=light-blue]{background-color:#03a9f4}[data-md-color-primary=light-blue] .md-typeset a{color:#03a9f4}[data-md-color-primary=light-blue] .md-header,[data-md-color-primary=light-blue] .md-hero{background-color:#03a9f4}[data-md-color-primary=light-blue] .md-nav__link--active,[data-md-color-primary=light-blue] .md-nav__link:active{color:#03a9f4}[data-md-color-primary=light-blue] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=cyan]{background-color:#00bcd4}[data-md-color-primary=cyan] .md-typeset a{color:#00bcd4}[data-md-color-primary=cyan] .md-header,[data-md-color-primary=cyan] .md-hero{background-color:#00bcd4}[data-md-color-primary=cyan] .md-nav__link--active,[data-md-color-primary=cyan] .md-nav__link:active{color:#00bcd4}[data-md-color-primary=cyan] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=teal]{background-color:#009688}[data-md-color-primary=teal] .md-typeset a{color:#009688}[data-md-color-primary=teal] .md-header,[data-md-color-primary=teal] .md-hero{background-color:#009688}[data-md-color-primary=teal] .md-nav__link--active,[data-md-color-primary=teal] .md-nav__link:active{color:#009688}[data-md-color-primary=teal] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=green]{background-color:#4caf50}[data-md-color-primary=green] .md-typeset a{color:#4caf50}[data-md-color-primary=green] .md-header,[data-md-color-primary=green] .md-hero{background-color:#4caf50}[data-md-color-primary=green] .md-nav__link--active,[data-md-color-primary=green] .md-nav__link:active{color:#4caf50}[data-md-color-primary=green] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=light-green]{background-color:#7cb342}[data-md-color-primary=light-green] .md-typeset a{color:#7cb342}[data-md-color-primary=light-green] .md-header,[data-md-color-primary=light-green] .md-hero{background-color:#7cb342}[data-md-color-primary=light-green] .md-nav__link--active,[data-md-color-primary=light-green] .md-nav__link:active{color:#7cb342}[data-md-color-primary=light-green] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=lime]{background-color:#c0ca33}[data-md-color-primary=lime] .md-typeset a{color:#c0ca33}[data-md-color-primary=lime] .md-header,[data-md-color-primary=lime] .md-hero{background-color:#c0ca33}[data-md-color-primary=lime] .md-nav__link--active,[data-md-color-primary=lime] .md-nav__link:active{color:#c0ca33}[data-md-color-primary=lime] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=yellow]{background-color:#f9a825}[data-md-color-primary=yellow] .md-typeset a{color:#f9a825}[data-md-color-primary=yellow] .md-header,[data-md-color-primary=yellow] .md-hero{background-color:#f9a825}[data-md-color-primary=yellow] .md-nav__link--active,[data-md-color-primary=yellow] .md-nav__link:active{color:#f9a825}[data-md-color-primary=yellow] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=amber]{background-color:#ffa000}[data-md-color-primary=amber] .md-typeset a{color:#ffa000}[data-md-color-primary=amber] .md-header,[data-md-color-primary=amber] .md-hero{background-color:#ffa000}[data-md-color-primary=amber] .md-nav__link--active,[data-md-color-primary=amber] .md-nav__link:active{color:#ffa000}[data-md-color-primary=amber] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=orange]{background-color:#fb8c00}[data-md-color-primary=orange] .md-typeset a{color:#fb8c00}[data-md-color-primary=orange] .md-header,[data-md-color-primary=orange] .md-hero{background-color:#fb8c00}[data-md-color-primary=orange] .md-nav__link--active,[data-md-color-primary=orange] .md-nav__link:active{color:#fb8c00}[data-md-color-primary=orange] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=deep-orange]{background-color:#ff7043}[data-md-color-primary=deep-orange] .md-typeset a{color:#ff7043}[data-md-color-primary=deep-orange] .md-header,[data-md-color-primary=deep-orange] .md-hero{background-color:#ff7043}[data-md-color-primary=deep-orange] .md-nav__link--active,[data-md-color-primary=deep-orange] .md-nav__link:active{color:#ff7043}[data-md-color-primary=deep-orange] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=brown]{background-color:#795548}[data-md-color-primary=brown] .md-typeset a{color:#795548}[data-md-color-primary=brown] .md-header,[data-md-color-primary=brown] .md-hero{background-color:#795548}[data-md-color-primary=brown] .md-nav__link--active,[data-md-color-primary=brown] .md-nav__link:active{color:#795548}[data-md-color-primary=brown] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=grey]{background-color:#757575}[data-md-color-primary=grey] .md-typeset a{color:#757575}[data-md-color-primary=grey] .md-header,[data-md-color-primary=grey] .md-hero{background-color:#757575}[data-md-color-primary=grey] .md-nav__link--active,[data-md-color-primary=grey] .md-nav__link:active{color:#757575}[data-md-color-primary=grey] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=blue-grey]{background-color:#546e7a}[data-md-color-primary=blue-grey] .md-typeset a{color:#546e7a}[data-md-color-primary=blue-grey] .md-header,[data-md-color-primary=blue-grey] .md-hero{background-color:#546e7a}[data-md-color-primary=blue-grey] .md-nav__link--active,[data-md-color-primary=blue-grey] .md-nav__link:active{color:#546e7a}[data-md-color-primary=blue-grey] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=white]{-webkit-box-shadow:0 0 .1rem rgba(0,0,0,.54) inset;box-shadow:inset 0 0 .1rem rgba(0,0,0,.54)}[data-md-color-primary=white] .md-header,[data-md-color-primary=white] .md-hero,button[data-md-color-primary=white]{background-color:#fff;color:rgba(0,0,0,.87)}[data-md-color-primary=white] .md-hero--expand{border-bottom:.1rem solid rgba(0,0,0,.07)}button[data-md-color-accent=red]{background-color:#ff1744}[data-md-color-accent=red] .md-typeset a:active,[data-md-color-accent=red] .md-typeset a:hover{color:#ff1744}[data-md-color-accent=red] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=red] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ff1744}[data-md-color-accent=red] .md-nav__link:focus,[data-md-color-accent=red] .md-nav__link:hover,[data-md-color-accent=red] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=red] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=red] .md-typeset .md-clipboard:active:before,[data-md-color-accent=red] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=red] .md-typeset [id] .headerlink:focus,[data-md-color-accent=red] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=red] .md-typeset [id]:target .headerlink{color:#ff1744}[data-md-color-accent=red] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff1744}[data-md-color-accent=red] .md-search-result__link:hover,[data-md-color-accent=red] .md-search-result__link[data-md-state=active]{background-color:rgba(255,23,68,.1)}[data-md-color-accent=red] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff1744}[data-md-color-accent=red] .md-source-file:hover:before{background-color:#ff1744}button[data-md-color-accent=pink]{background-color:#f50057}[data-md-color-accent=pink] .md-typeset a:active,[data-md-color-accent=pink] .md-typeset a:hover{color:#f50057}[data-md-color-accent=pink] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=pink] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#f50057}[data-md-color-accent=pink] .md-nav__link:focus,[data-md-color-accent=pink] .md-nav__link:hover,[data-md-color-accent=pink] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=pink] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=pink] .md-typeset .md-clipboard:active:before,[data-md-color-accent=pink] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=pink] .md-typeset [id] .headerlink:focus,[data-md-color-accent=pink] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=pink] .md-typeset [id]:target .headerlink{color:#f50057}[data-md-color-accent=pink] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#f50057}[data-md-color-accent=pink] .md-search-result__link:hover,[data-md-color-accent=pink] .md-search-result__link[data-md-state=active]{background-color:rgba(245,0,87,.1)}[data-md-color-accent=pink] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#f50057}[data-md-color-accent=pink] .md-source-file:hover:before{background-color:#f50057}button[data-md-color-accent=purple]{background-color:#e040fb}[data-md-color-accent=purple] .md-typeset a:active,[data-md-color-accent=purple] .md-typeset a:hover{color:#e040fb}[data-md-color-accent=purple] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=purple] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#e040fb}[data-md-color-accent=purple] .md-nav__link:focus,[data-md-color-accent=purple] .md-nav__link:hover,[data-md-color-accent=purple] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=purple] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=purple] .md-typeset .md-clipboard:active:before,[data-md-color-accent=purple] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=purple] .md-typeset [id] .headerlink:focus,[data-md-color-accent=purple] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=purple] .md-typeset [id]:target .headerlink{color:#e040fb}[data-md-color-accent=purple] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#e040fb}[data-md-color-accent=purple] .md-search-result__link:hover,[data-md-color-accent=purple] .md-search-result__link[data-md-state=active]{background-color:rgba(224,64,251,.1)}[data-md-color-accent=purple] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#e040fb}[data-md-color-accent=purple] .md-source-file:hover:before{background-color:#e040fb}button[data-md-color-accent=deep-purple]{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-typeset a:active,[data-md-color-accent=deep-purple] .md-typeset a:hover{color:#7c4dff}[data-md-color-accent=deep-purple] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=deep-purple] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-nav__link:focus,[data-md-color-accent=deep-purple] .md-nav__link:hover,[data-md-color-accent=deep-purple] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=deep-purple] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=deep-purple] .md-typeset .md-clipboard:active:before,[data-md-color-accent=deep-purple] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=deep-purple] .md-typeset [id] .headerlink:focus,[data-md-color-accent=deep-purple] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=deep-purple] .md-typeset [id]:target .headerlink{color:#7c4dff}[data-md-color-accent=deep-purple] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-search-result__link:hover,[data-md-color-accent=deep-purple] .md-search-result__link[data-md-state=active]{background-color:rgba(124,77,255,.1)}[data-md-color-accent=deep-purple] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-source-file:hover:before{background-color:#7c4dff}button[data-md-color-accent=indigo]{background-color:#536dfe}[data-md-color-accent=indigo] .md-typeset a:active,[data-md-color-accent=indigo] .md-typeset a:hover{color:#536dfe}[data-md-color-accent=indigo] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=indigo] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}[data-md-color-accent=indigo] .md-nav__link:focus,[data-md-color-accent=indigo] .md-nav__link:hover,[data-md-color-accent=indigo] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=indigo] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=indigo] .md-typeset .md-clipboard:active:before,[data-md-color-accent=indigo] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=indigo] .md-typeset [id] .headerlink:focus,[data-md-color-accent=indigo] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=indigo] .md-typeset [id]:target .headerlink{color:#536dfe}[data-md-color-accent=indigo] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}[data-md-color-accent=indigo] .md-search-result__link:hover,[data-md-color-accent=indigo] .md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}[data-md-color-accent=indigo] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}[data-md-color-accent=indigo] .md-source-file:hover:before{background-color:#536dfe}button[data-md-color-accent=blue]{background-color:#448aff}[data-md-color-accent=blue] .md-typeset a:active,[data-md-color-accent=blue] .md-typeset a:hover{color:#448aff}[data-md-color-accent=blue] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=blue] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#448aff}[data-md-color-accent=blue] .md-nav__link:focus,[data-md-color-accent=blue] .md-nav__link:hover,[data-md-color-accent=blue] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=blue] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=blue] .md-typeset .md-clipboard:active:before,[data-md-color-accent=blue] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=blue] .md-typeset [id] .headerlink:focus,[data-md-color-accent=blue] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=blue] .md-typeset [id]:target .headerlink{color:#448aff}[data-md-color-accent=blue] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#448aff}[data-md-color-accent=blue] .md-search-result__link:hover,[data-md-color-accent=blue] .md-search-result__link[data-md-state=active]{background-color:rgba(68,138,255,.1)}[data-md-color-accent=blue] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#448aff}[data-md-color-accent=blue] .md-source-file:hover:before{background-color:#448aff}button[data-md-color-accent=light-blue]{background-color:#0091ea}[data-md-color-accent=light-blue] .md-typeset a:active,[data-md-color-accent=light-blue] .md-typeset a:hover{color:#0091ea}[data-md-color-accent=light-blue] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=light-blue] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#0091ea}[data-md-color-accent=light-blue] .md-nav__link:focus,[data-md-color-accent=light-blue] .md-nav__link:hover,[data-md-color-accent=light-blue] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=light-blue] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=light-blue] .md-typeset .md-clipboard:active:before,[data-md-color-accent=light-blue] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=light-blue] .md-typeset [id] .headerlink:focus,[data-md-color-accent=light-blue] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=light-blue] .md-typeset [id]:target .headerlink{color:#0091ea}[data-md-color-accent=light-blue] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#0091ea}[data-md-color-accent=light-blue] .md-search-result__link:hover,[data-md-color-accent=light-blue] .md-search-result__link[data-md-state=active]{background-color:rgba(0,145,234,.1)}[data-md-color-accent=light-blue] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#0091ea}[data-md-color-accent=light-blue] .md-source-file:hover:before{background-color:#0091ea}button[data-md-color-accent=cyan]{background-color:#00b8d4}[data-md-color-accent=cyan] .md-typeset a:active,[data-md-color-accent=cyan] .md-typeset a:hover{color:#00b8d4}[data-md-color-accent=cyan] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=cyan] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#00b8d4}[data-md-color-accent=cyan] .md-nav__link:focus,[data-md-color-accent=cyan] .md-nav__link:hover,[data-md-color-accent=cyan] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=cyan] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=cyan] .md-typeset .md-clipboard:active:before,[data-md-color-accent=cyan] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=cyan] .md-typeset [id] .headerlink:focus,[data-md-color-accent=cyan] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=cyan] .md-typeset [id]:target .headerlink{color:#00b8d4}[data-md-color-accent=cyan] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00b8d4}[data-md-color-accent=cyan] .md-search-result__link:hover,[data-md-color-accent=cyan] .md-search-result__link[data-md-state=active]{background-color:rgba(0,184,212,.1)}[data-md-color-accent=cyan] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00b8d4}[data-md-color-accent=cyan] .md-source-file:hover:before{background-color:#00b8d4}button[data-md-color-accent=teal]{background-color:#00bfa5}[data-md-color-accent=teal] .md-typeset a:active,[data-md-color-accent=teal] .md-typeset a:hover{color:#00bfa5}[data-md-color-accent=teal] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=teal] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#00bfa5}[data-md-color-accent=teal] .md-nav__link:focus,[data-md-color-accent=teal] .md-nav__link:hover,[data-md-color-accent=teal] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=teal] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=teal] .md-typeset .md-clipboard:active:before,[data-md-color-accent=teal] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=teal] .md-typeset [id] .headerlink:focus,[data-md-color-accent=teal] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=teal] .md-typeset [id]:target .headerlink{color:#00bfa5}[data-md-color-accent=teal] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00bfa5}[data-md-color-accent=teal] .md-search-result__link:hover,[data-md-color-accent=teal] .md-search-result__link[data-md-state=active]{background-color:rgba(0,191,165,.1)}[data-md-color-accent=teal] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00bfa5}[data-md-color-accent=teal] .md-source-file:hover:before{background-color:#00bfa5}button[data-md-color-accent=green]{background-color:#00c853}[data-md-color-accent=green] .md-typeset a:active,[data-md-color-accent=green] .md-typeset a:hover{color:#00c853}[data-md-color-accent=green] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=green] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#00c853}[data-md-color-accent=green] .md-nav__link:focus,[data-md-color-accent=green] .md-nav__link:hover,[data-md-color-accent=green] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=green] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=green] .md-typeset .md-clipboard:active:before,[data-md-color-accent=green] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=green] .md-typeset [id] .headerlink:focus,[data-md-color-accent=green] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=green] .md-typeset [id]:target .headerlink{color:#00c853}[data-md-color-accent=green] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00c853}[data-md-color-accent=green] .md-search-result__link:hover,[data-md-color-accent=green] .md-search-result__link[data-md-state=active]{background-color:rgba(0,200,83,.1)}[data-md-color-accent=green] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00c853}[data-md-color-accent=green] .md-source-file:hover:before{background-color:#00c853}button[data-md-color-accent=light-green]{background-color:#64dd17}[data-md-color-accent=light-green] .md-typeset a:active,[data-md-color-accent=light-green] .md-typeset a:hover{color:#64dd17}[data-md-color-accent=light-green] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=light-green] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#64dd17}[data-md-color-accent=light-green] .md-nav__link:focus,[data-md-color-accent=light-green] .md-nav__link:hover,[data-md-color-accent=light-green] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=light-green] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=light-green] .md-typeset .md-clipboard:active:before,[data-md-color-accent=light-green] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=light-green] .md-typeset [id] .headerlink:focus,[data-md-color-accent=light-green] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=light-green] .md-typeset [id]:target .headerlink{color:#64dd17}[data-md-color-accent=light-green] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#64dd17}[data-md-color-accent=light-green] .md-search-result__link:hover,[data-md-color-accent=light-green] .md-search-result__link[data-md-state=active]{background-color:rgba(100,221,23,.1)}[data-md-color-accent=light-green] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#64dd17}[data-md-color-accent=light-green] .md-source-file:hover:before{background-color:#64dd17}button[data-md-color-accent=lime]{background-color:#aeea00}[data-md-color-accent=lime] .md-typeset a:active,[data-md-color-accent=lime] .md-typeset a:hover{color:#aeea00}[data-md-color-accent=lime] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=lime] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#aeea00}[data-md-color-accent=lime] .md-nav__link:focus,[data-md-color-accent=lime] .md-nav__link:hover,[data-md-color-accent=lime] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=lime] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=lime] .md-typeset .md-clipboard:active:before,[data-md-color-accent=lime] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=lime] .md-typeset [id] .headerlink:focus,[data-md-color-accent=lime] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=lime] .md-typeset [id]:target .headerlink{color:#aeea00}[data-md-color-accent=lime] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#aeea00}[data-md-color-accent=lime] .md-search-result__link:hover,[data-md-color-accent=lime] .md-search-result__link[data-md-state=active]{background-color:rgba(174,234,0,.1)}[data-md-color-accent=lime] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#aeea00}[data-md-color-accent=lime] .md-source-file:hover:before{background-color:#aeea00}button[data-md-color-accent=yellow]{background-color:#ffd600}[data-md-color-accent=yellow] .md-typeset a:active,[data-md-color-accent=yellow] .md-typeset a:hover{color:#ffd600}[data-md-color-accent=yellow] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=yellow] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ffd600}[data-md-color-accent=yellow] .md-nav__link:focus,[data-md-color-accent=yellow] .md-nav__link:hover,[data-md-color-accent=yellow] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=yellow] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=yellow] .md-typeset .md-clipboard:active:before,[data-md-color-accent=yellow] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=yellow] .md-typeset [id] .headerlink:focus,[data-md-color-accent=yellow] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=yellow] .md-typeset [id]:target .headerlink{color:#ffd600}[data-md-color-accent=yellow] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffd600}[data-md-color-accent=yellow] .md-search-result__link:hover,[data-md-color-accent=yellow] .md-search-result__link[data-md-state=active]{background-color:rgba(255,214,0,.1)}[data-md-color-accent=yellow] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffd600}[data-md-color-accent=yellow] .md-source-file:hover:before{background-color:#ffd600}button[data-md-color-accent=amber]{background-color:#ffab00}[data-md-color-accent=amber] .md-typeset a:active,[data-md-color-accent=amber] .md-typeset a:hover{color:#ffab00}[data-md-color-accent=amber] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=amber] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ffab00}[data-md-color-accent=amber] .md-nav__link:focus,[data-md-color-accent=amber] .md-nav__link:hover,[data-md-color-accent=amber] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=amber] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=amber] .md-typeset .md-clipboard:active:before,[data-md-color-accent=amber] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=amber] .md-typeset [id] .headerlink:focus,[data-md-color-accent=amber] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=amber] .md-typeset [id]:target .headerlink{color:#ffab00}[data-md-color-accent=amber] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffab00}[data-md-color-accent=amber] .md-search-result__link:hover,[data-md-color-accent=amber] .md-search-result__link[data-md-state=active]{background-color:rgba(255,171,0,.1)}[data-md-color-accent=amber] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffab00}[data-md-color-accent=amber] .md-source-file:hover:before{background-color:#ffab00}button[data-md-color-accent=orange]{background-color:#ff9100}[data-md-color-accent=orange] .md-typeset a:active,[data-md-color-accent=orange] .md-typeset a:hover{color:#ff9100}[data-md-color-accent=orange] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=orange] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ff9100}[data-md-color-accent=orange] .md-nav__link:focus,[data-md-color-accent=orange] .md-nav__link:hover,[data-md-color-accent=orange] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=orange] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=orange] .md-typeset .md-clipboard:active:before,[data-md-color-accent=orange] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=orange] .md-typeset [id] .headerlink:focus,[data-md-color-accent=orange] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=orange] .md-typeset [id]:target .headerlink{color:#ff9100}[data-md-color-accent=orange] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff9100}[data-md-color-accent=orange] .md-search-result__link:hover,[data-md-color-accent=orange] .md-search-result__link[data-md-state=active]{background-color:rgba(255,145,0,.1)}[data-md-color-accent=orange] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff9100}[data-md-color-accent=orange] .md-source-file:hover:before{background-color:#ff9100}button[data-md-color-accent=deep-orange]{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-typeset a:active,[data-md-color-accent=deep-orange] .md-typeset a:hover{color:#ff6e40}[data-md-color-accent=deep-orange] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=deep-orange] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-nav__link:focus,[data-md-color-accent=deep-orange] .md-nav__link:hover,[data-md-color-accent=deep-orange] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=deep-orange] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=deep-orange] .md-typeset .md-clipboard:active:before,[data-md-color-accent=deep-orange] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=deep-orange] .md-typeset [id] .headerlink:focus,[data-md-color-accent=deep-orange] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=deep-orange] .md-typeset [id]:target .headerlink{color:#ff6e40}[data-md-color-accent=deep-orange] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-search-result__link:hover,[data-md-color-accent=deep-orange] .md-search-result__link[data-md-state=active]{background-color:rgba(255,110,64,.1)}[data-md-color-accent=deep-orange] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-source-file:hover:before{background-color:#ff6e40}@media only screen and (max-width:59.9375em){[data-md-color-primary=red] .md-nav__source{background-color:rgba(190,66,64,.9675)}[data-md-color-primary=pink] .md-nav__source{background-color:rgba(185,24,79,.9675)}[data-md-color-primary=purple] .md-nav__source{background-color:rgba(136,57,150,.9675)}[data-md-color-primary=deep-purple] .md-nav__source{background-color:rgba(100,69,154,.9675)}[data-md-color-primary=indigo] .md-nav__source{background-color:rgba(50,64,144,.9675)}[data-md-color-primary=blue] .md-nav__source{background-color:rgba(26,119,193,.9675)}[data-md-color-primary=light-blue] .md-nav__source{background-color:rgba(2,134,194,.9675)}[data-md-color-primary=cyan] .md-nav__source{background-color:rgba(0,150,169,.9675)}[data-md-color-primary=teal] .md-nav__source{background-color:rgba(0,119,108,.9675)}[data-md-color-primary=green] .md-nav__source{background-color:rgba(60,139,64,.9675)}[data-md-color-primary=light-green] .md-nav__source{background-color:rgba(99,142,53,.9675)}[data-md-color-primary=lime] .md-nav__source{background-color:rgba(153,161,41,.9675)}[data-md-color-primary=yellow] .md-nav__source{background-color:rgba(198,134,29,.9675)}[data-md-color-primary=amber] .md-nav__source{background-color:rgba(203,127,0,.9675)}[data-md-color-primary=orange] .md-nav__source{background-color:rgba(200,111,0,.9675)}[data-md-color-primary=deep-orange] .md-nav__source{background-color:rgba(203,89,53,.9675)}[data-md-color-primary=brown] .md-nav__source{background-color:rgba(96,68,57,.9675)}[data-md-color-primary=grey] .md-nav__source{background-color:rgba(93,93,93,.9675)}[data-md-color-primary=blue-grey] .md-nav__source{background-color:rgba(67,88,97,.9675)}[data-md-color-primary=white] .md-nav__source{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.87)}}@media only screen and (max-width:76.1875em){html [data-md-color-primary=red] .md-nav--primary .md-nav__title--site{background-color:#ef5350}html [data-md-color-primary=pink] .md-nav--primary .md-nav__title--site{background-color:#e91e63}html [data-md-color-primary=purple] .md-nav--primary .md-nav__title--site{background-color:#ab47bc}html [data-md-color-primary=deep-purple] .md-nav--primary .md-nav__title--site{background-color:#7e57c2}html [data-md-color-primary=indigo] .md-nav--primary .md-nav__title--site{background-color:#3f51b5}html [data-md-color-primary=blue] .md-nav--primary .md-nav__title--site{background-color:#2196f3}html [data-md-color-primary=light-blue] .md-nav--primary .md-nav__title--site{background-color:#03a9f4}html [data-md-color-primary=cyan] .md-nav--primary .md-nav__title--site{background-color:#00bcd4}html [data-md-color-primary=teal] .md-nav--primary .md-nav__title--site{background-color:#009688}html [data-md-color-primary=green] .md-nav--primary .md-nav__title--site{background-color:#4caf50}html [data-md-color-primary=light-green] .md-nav--primary .md-nav__title--site{background-color:#7cb342}html [data-md-color-primary=lime] .md-nav--primary .md-nav__title--site{background-color:#c0ca33}html [data-md-color-primary=yellow] .md-nav--primary .md-nav__title--site{background-color:#f9a825}html [data-md-color-primary=amber] .md-nav--primary .md-nav__title--site{background-color:#ffa000}html [data-md-color-primary=orange] .md-nav--primary .md-nav__title--site{background-color:#fb8c00}html [data-md-color-primary=deep-orange] .md-nav--primary .md-nav__title--site{background-color:#ff7043}html [data-md-color-primary=brown] .md-nav--primary .md-nav__title--site{background-color:#795548}html [data-md-color-primary=grey] .md-nav--primary .md-nav__title--site{background-color:#757575}html [data-md-color-primary=blue-grey] .md-nav--primary .md-nav__title--site{background-color:#546e7a}html [data-md-color-primary=white] .md-nav--primary .md-nav__title--site{background-color:#fff;color:rgba(0,0,0,.87)}[data-md-color-primary=white] .md-hero{border-bottom:.1rem solid rgba(0,0,0,.07)}}@media only screen and (min-width:76.25em){[data-md-color-primary=red] .md-tabs{background-color:#ef5350}[data-md-color-primary=pink] .md-tabs{background-color:#e91e63}[data-md-color-primary=purple] .md-tabs{background-color:#ab47bc}[data-md-color-primary=deep-purple] .md-tabs{background-color:#7e57c2}[data-md-color-primary=indigo] .md-tabs{background-color:#3f51b5}[data-md-color-primary=blue] .md-tabs{background-color:#2196f3}[data-md-color-primary=light-blue] .md-tabs{background-color:#03a9f4}[data-md-color-primary=cyan] .md-tabs{background-color:#00bcd4}[data-md-color-primary=teal] .md-tabs{background-color:#009688}[data-md-color-primary=green] .md-tabs{background-color:#4caf50}[data-md-color-primary=light-green] .md-tabs{background-color:#7cb342}[data-md-color-primary=lime] .md-tabs{background-color:#c0ca33}[data-md-color-primary=yellow] .md-tabs{background-color:#f9a825}[data-md-color-primary=amber] .md-tabs{background-color:#ffa000}[data-md-color-primary=orange] .md-tabs{background-color:#fb8c00}[data-md-color-primary=deep-orange] .md-tabs{background-color:#ff7043}[data-md-color-primary=brown] .md-tabs{background-color:#795548}[data-md-color-primary=grey] .md-tabs{background-color:#757575}[data-md-color-primary=blue-grey] .md-tabs{background-color:#546e7a}[data-md-color-primary=white] .md-tabs{border-bottom:.1rem solid rgba(0,0,0,.07);background-color:#fff;color:rgba(0,0,0,.87)}}@media only screen and (min-width:60em){[data-md-color-primary=white] .md-search__input{background-color:rgba(0,0,0,.07)}[data-md-color-primary=white] .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input:-ms-input-placeholder,[data-md-color-primary=white] .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input::placeholder{color:rgba(0,0,0,.54)}} +/*# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IiIsImZpbGUiOiJhc3NldHMvc3R5bGVzaGVldHMvYXBwbGljYXRpb24tcGFsZXR0ZS43OTI0MzFjMS5jc3MiLCJzb3VyY2VSb290IjoiIn0=*/ \ No newline at end of file diff --git a/docs/assets/stylesheets/application.0e9c8aca.css b/docs/assets/stylesheets/application.0e9c8aca.css new file mode 100644 index 000000000..e485b78df --- /dev/null +++ b/docs/assets/stylesheets/application.0e9c8aca.css @@ -0,0 +1,2 @@ +html{-webkit-box-sizing:border-box;box-sizing:border-box}*,:after,:before{-webkit-box-sizing:inherit;box-sizing:inherit}html{-webkit-text-size-adjust:none;-moz-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}body{margin:0}hr{overflow:visible;-webkit-box-sizing:content-box;box-sizing:content-box}a{-webkit-text-decoration-skip:objects}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}a:active,a:hover{outline-width:0}small,sub,sup{font-size:80%}sub,sup{position:relative;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}table{border-collapse:collapse;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{padding:0;background:transparent;font-size:inherit}button,input{border:0;outline:0}.md-clipboard:before,.md-icon,.md-nav__button,.md-nav__link:after,.md-nav__title:before,.md-search-result__article--document:before,.md-source-file:before,.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before,.md-typeset summary:after{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;speak:none;word-wrap:normal;direction:ltr}.md-content__icon,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before,.md-search-result__article--document:before{display:inline-block;margin:.4rem;padding:.8rem;font-size:2.4rem;cursor:pointer}.md-icon--arrow-back:before{content:"\E5C4"}.md-icon--arrow-forward:before{content:"\E5C8"}.md-icon--menu:before{content:"\E5D2"}.md-icon--search:before{content:"\E8B6"}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","liga";font-feature-settings:"kern","liga";font-family:Helvetica Neue,Helvetica,Arial,sans-serif}code,kbd,pre{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern";font-feature-settings:"kern";font-family:Courier New,Courier,monospace}.md-typeset{font-size:1.6rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 4rem;color:rgba(0,0,0,.54);font-size:3.125rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:4rem 0 1.6rem;font-size:2.5rem;line-height:1.4}.md-typeset h3{margin:3.2rem 0 1.6rem;font-size:2rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:1.6rem}.md-typeset h4{font-size:1.6rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:1.6rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.1rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,93%,.5);color:#37474f;font-size:85%}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.2rem;-webkit-box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:none;background-color:inherit;color:inherit;-webkit-box-shadow:none;box-shadow:none}.md-typeset pre{position:relative;margin:1em 0;border-radius:.2rem;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset pre>code{display:block;margin:0;padding:1.05rem 1.2rem;background-color:transparent;font-size:inherit;-webkit-box-shadow:none;box-shadow:none;-webkit-box-decoration-break:none;box-decoration-break:none;overflow:auto}.md-typeset pre>code::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset kbd{padding:0 .29412em;border:.1rem solid #c9c9c9;border-radius:.2rem;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;-webkit-box-shadow:0 .1rem 0 #b0b0b0;box-shadow:0 .1rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;background-color:rgba(255,235,59,.5);-webkit-box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.1rem dotted rgba(0,0,0,.54);text-decoration:none;cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}.md-typeset blockquote{padding-left:1.2rem;border-left:.4rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}.md-typeset dd{margin:1em 0 1em 1.875em}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){-webkit-box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);display:inline-block;max-width:100%;border-radius:.2rem;font-size:1.28rem;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}.md-typeset table:not([class]) th{min-width:10rem;padding:1.2rem 1.6rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:1.2rem 1.6rem;border-top:.1rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset__scrollwrap{margin:1em -1.6rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 1.6rem}.md-typeset .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{font-size:62.5%;overflow-x:hidden}body,html{height:100%}body{position:relative}hr{display:block;height:.1rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:122rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;padding-top:4.8rem;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{height:100%;padding-top:3rem;padding-bottom:.1rem}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:3}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}@page{margin:25mm}.md-clipboard{position:absolute;top:.6rem;right:.6rem;width:2.8rem;height:2.8rem;border-radius:.2rem;font-size:1.6rem;cursor:pointer;z-index:1;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-clipboard:before{-webkit-transition:color .25s,opacity .25s;transition:color .25s,opacity .25s;color:rgba(0,0,0,.54);content:"\E14D";opacity:.25}.codehilite:hover .md-clipboard:before,.md-typeset .highlight:hover .md-clipboard:before,pre:hover .md-clipboard:before{opacity:1}.md-clipboard:active:before,.md-clipboard:hover:before{color:#536dfe}.md-clipboard__message{display:block;position:absolute;top:0;right:3.4rem;padding:.6rem 1rem;-webkit-transform:translateX(.8rem);transform:translateX(.8rem);-webkit-transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s;transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);border-radius:.2rem;background-color:rgba(0,0,0,.54);color:#fff;font-size:1.28rem;white-space:nowrap;opacity:0;pointer-events:none}.md-clipboard__message--active{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;pointer-events:auto}.md-clipboard__message:before{content:attr(aria-label)}.md-clipboard__message:after{display:block;position:absolute;top:50%;right:-.4rem;width:0;margin-top:-.4rem;border-width:.4rem 0 .4rem .4rem;border-style:solid;border-color:transparent rgba(0,0,0,.54);content:""}.md-content__inner{margin:0 1.6rem 2.4rem;padding-top:1.2rem}.md-content__inner:before{display:block;height:.8rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__icon{position:relative;margin:.8rem 0;padding:0;float:right}.md-typeset .md-content__icon{color:rgba(0,0,0,.26)}.md-header{position:fixed;top:0;right:0;left:0;height:4.8rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;background-color:#3f51b5;color:#fff;z-index:2;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-header,.no-js .md-header{-webkit-box-shadow:none;box-shadow:none}.no-js .md-header{-webkit-transition:none;transition:none}.md-header[data-md-state=shadow]{-webkit-transition:background-color .25s,color .25s,-webkit-box-shadow .25s;transition:background-color .25s,color .25s,-webkit-box-shadow .25s;transition:background-color .25s,color .25s,box-shadow .25s;transition:background-color .25s,color .25s,box-shadow .25s,-webkit-box-shadow .25s;-webkit-box-shadow:0 0 .4rem rgba(0,0,0,.1),0 .4rem .8rem rgba(0,0,0,.2);box-shadow:0 0 .4rem rgba(0,0,0,.1),0 .4rem .8rem rgba(0,0,0,.2)}.md-header-nav{padding:0 .4rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo *{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__topic{display:block;position:absolute;-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(2.5rem);transform:translateX(2.5rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}.no-js .md-header-nav__topic{position:static}.no-js .md-header-nav__topic+.md-header-nav__topic{display:none}.md-header-nav__title{padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(-2.5rem);transform:translateX(-2.5rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}.md-header-nav__title[data-md-state=active] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);opacity:1;z-index:0;pointer-events:auto}.md-header-nav__source{display:none}.md-hero{-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;font-size:2rem;overflow:hidden}.md-hero__inner{margin-top:2rem;padding:1.6rem 1.6rem .8rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);-webkit-transition-delay:.1s;transition-delay:.1s}[data-md-state=hidden] .md-hero__inner{pointer-events:none;-webkit-transform:translateY(1.25rem);transform:translateY(1.25rem);-webkit-transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:transform 0s .4s,opacity .1s 0s;transition:transform 0s .4s,opacity .1s 0s,-webkit-transform 0s .4s;opacity:0}.md-hero--expand .md-hero__inner{margin-bottom:2.4rem}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.4rem;overflow:auto}.md-footer-nav__link{padding-top:2.8rem;padding-bottom:.8rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}.md-footer-nav__link--next{width:75%;float:right;text-align:right}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-2rem;padding:0 2rem;color:hsla(0,0%,100%,.7);font-size:1.5rem}.md-footer-meta{background-color:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.4rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:#fff}.md-footer-copyright{margin:0 1.2rem;padding:.8rem 0;color:hsla(0,0%,100%,.3);font-size:1.28rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .8rem;padding:.4rem 0 1.2rem}.md-footer-social__link{display:inline-block;width:3.2rem;height:3.2rem;font-size:1.6rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:1.4rem;line-height:1.3}.md-nav--secondary .md-nav__link--active{color:#3f51b5}.md-nav__title{display:block;padding:0 1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"\E5C4"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:0 1.2rem}.md-nav__item:last-child{padding-bottom:1.2rem}.md-nav__item .md-nav__item{padding-right:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;margin-top:.625em;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"\E313"}html .md-nav__link[for=toc],html .md-nav__link[for=toc]+.md-nav__link:after,html .md-nav__link[for=toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link:active{color:#3f51b5}.md-nav__item--nested>.md-nav__link{color:inherit}.md-nav__link:focus,.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 4.8rem 0 7.2rem;text-overflow:ellipsis;z-index:2}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-webkit-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;font-size:2.4rem;cursor:pointer;z-index:2}.md-search__icon:hover{opacity:.7}.md-search__icon[for=search]{top:.6rem;left:1rem}.md-search__icon[for=search]:before{content:"\E8B6"}.md-search__icon[type=reset]{top:.6rem;right:1rem;-webkit-transform:scale(.125);transform:scale(.125);-webkit-transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]{-webkit-transform:scale(1);transform:scale(1);opacity:1}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]:hover{opacity:.7}.md-search__output{position:absolute;width:100%;border-radius:0 0 .2rem .2rem;overflow:hidden;z-index:1}.md-search__scrollwrap{height:100%;background-color:#fff;-webkit-box-shadow:0 .1rem 0 rgba(0,0,0,.07) inset;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result{color:rgba(0,0,0,.87);word-break:break-word}.md-search-result__meta{padding:0 1.6rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:1.28rem;line-height:3.6rem}.md-search-result__list{margin:0;padding:0;border-top:.1rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{-webkit-box-shadow:0 -.1rem 0 rgba(0,0,0,.07);box-shadow:0 -.1rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;-webkit-transition:background .25s;transition:background .25s;outline:0;overflow:hidden}.md-search-result__link:hover,.md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}.md-search-result__link:hover .md-search-result__article:before,.md-search-result__link[data-md-state=active] .md-search-result__article:before{opacity:.7}.md-search-result__link:last-child .md-search-result__teaser{margin-bottom:1.2rem}.md-search-result__article{position:relative;padding:0 1.6rem;overflow:auto}.md-search-result__article--document:before{position:absolute;left:0;margin:.2rem;-webkit-transition:opacity .25s;transition:opacity .25s;color:rgba(0,0,0,.54);content:"\E880"}.md-search-result__article--document .md-search-result__title{margin:1.1rem 0;font-size:1.6rem;font-weight:400;line-height:1.4}.md-search-result__title{margin:.5em 0;font-size:1.28rem;font-weight:700;line-height:1.4}.md-search-result__teaser{display:-webkit-box;max-height:3.3rem;margin:.5em 0;color:rgba(0,0,0,.54);font-size:1.28rem;line-height:1.4;text-overflow:ellipsis;overflow:hidden;-webkit-box-orient:vertical;-webkit-line-clamp:2}.md-search-result em{font-style:normal;font-weight:700;text-decoration:underline}.md-sidebar{position:absolute;width:24.2rem;padding:2.4rem 0;overflow:hidden}.md-sidebar[data-md-state=lock]{position:fixed;top:4.8rem}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .4rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes md-source__facts--done{0%{height:0}to{height:1.3rem}}@keyframes md-source__facts--done{0%{height:0}to{height:1.3rem}}@-webkit-keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;padding-right:1.2rem;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:1.3rem;line-height:1.2;white-space:nowrap}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:4.8rem;content:"";vertical-align:middle}.md-source__icon{width:4.8rem}.md-source__icon svg{width:2.4rem;height:2.4rem;margin-top:1.2rem;margin-left:1.2rem}.md-source__icon+.md-source__repository{margin-left:-4.4rem;padding-left:4rem}.md-source__repository{display:inline-block;max-width:100%;margin-left:1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:1.1rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done .25s ease-in;animation:md-source__facts--done .25s ease-in}.md-source__fact{float:left}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done .4s ease-out;animation:md-source__fact--done .4s ease-out}.md-source__fact:before{margin:0 .2rem;content:"\B7"}.md-source__fact:first-child:before{display:none}.md-source-file{display:inline-block;margin:1em .5em 1em 0;padding-right:.5rem;border-radius:.2rem;background-color:rgba(0,0,0,.07);font-size:1.28rem;list-style-type:none;cursor:pointer;overflow:hidden}.md-source-file:before{display:inline-block;margin-right:.5rem;padding:.5rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:1.6rem;content:"\E86F";vertical-align:middle}html .md-source-file{-webkit-transition:background .4s,color .4s,-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1)}html .md-source-file:before{-webkit-transition:inherit;transition:inherit}html body .md-typeset .md-source-file{color:rgba(0,0,0,.54)}.md-source-file:hover{-webkit-box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36);box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36)}.md-source-file:hover:before{background-color:#536dfe}.md-tabs{width:100%;-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;overflow:auto}.md-tabs__list{margin:0;margin-left:.4rem;padding:0;list-style:none;white-space:nowrap}.md-tabs__item{display:inline-block;height:4.8rem;padding-right:1.2rem;padding-left:1.2rem}.md-tabs__link{display:block;margin-top:1.6rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);font-size:1.4rem;opacity:.7}.md-tabs__link--active,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{-webkit-transition-delay:.02s;transition-delay:.02s}.md-tabs__item:nth-child(3) .md-tabs__link{-webkit-transition-delay:.04s;transition-delay:.04s}.md-tabs__item:nth-child(4) .md-tabs__link{-webkit-transition-delay:.06s;transition-delay:.06s}.md-tabs__item:nth-child(5) .md-tabs__link{-webkit-transition-delay:.08s;transition-delay:.08s}.md-tabs__item:nth-child(6) .md-tabs__link{-webkit-transition-delay:.1s;transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{-webkit-transition-delay:.12s;transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{-webkit-transition-delay:.14s;transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{-webkit-transition-delay:.16s;transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{-webkit-transition-delay:.18s;transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{-webkit-transition-delay:.2s;transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{-webkit-transition-delay:.22s;transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{-webkit-transition-delay:.24s;transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{-webkit-transition-delay:.26s;transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{-webkit-transition-delay:.28s;transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{-webkit-transition-delay:.3s;transition-delay:.3s}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs[data-md-state=hidden] .md-tabs__link{-webkit-transform:translateY(50%);transform:translateY(50%);-webkit-transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,transform 0s .4s,opacity .1s;transition:color .25s,transform 0s .4s,opacity .1s,-webkit-transform 0s .4s;opacity:0}.md-typeset .admonition,.md-typeset details{-webkit-box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:relative;margin:1.5625em 0;padding:0 1.2rem;border-left:.4rem solid #448aff;border-radius:.2rem;font-size:1.28rem;overflow:auto}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:1.2rem}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin:1em 0}.md-typeset .admonition>.admonition-title,.md-typeset .admonition>summary,.md-typeset details>.admonition-title,.md-typeset details>summary{margin:0 -1.2rem;padding:.8rem 1.2rem .8rem 4rem;border-bottom:.1rem solid rgba(68,138,255,.1);background-color:rgba(68,138,255,.1);font-weight:700}.md-typeset .admonition>.admonition-title:last-child,.md-typeset .admonition>summary:last-child,.md-typeset details>.admonition-title:last-child,.md-typeset details>summary:last-child{margin-bottom:0}.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before{position:absolute;left:1.2rem;color:#448aff;font-size:2rem;content:"\E3C9"}.md-typeset .admonition.abstract,.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.abstract,.md-typeset details.summary,.md-typeset details.tldr{border-left:.4rem solid #00b0ff}.md-typeset .admonition.abstract>.admonition-title,.md-typeset .admonition.abstract>summary,.md-typeset .admonition.summary>.admonition-title,.md-typeset .admonition.summary>summary,.md-typeset .admonition.tldr>.admonition-title,.md-typeset .admonition.tldr>summary,.md-typeset details.abstract>.admonition-title,.md-typeset details.abstract>summary,.md-typeset details.summary>.admonition-title,.md-typeset details.summary>summary,.md-typeset details.tldr>.admonition-title,.md-typeset details.tldr>summary{border-bottom:.1rem solid rgba(0,176,255,.1);background-color:rgba(0,176,255,.1)}.md-typeset .admonition.abstract>.admonition-title:before,.md-typeset .admonition.abstract>summary:before,.md-typeset .admonition.summary>.admonition-title:before,.md-typeset .admonition.summary>summary:before,.md-typeset .admonition.tldr>.admonition-title:before,.md-typeset .admonition.tldr>summary:before,.md-typeset details.abstract>.admonition-title:before,.md-typeset details.abstract>summary:before,.md-typeset details.summary>.admonition-title:before,.md-typeset details.summary>summary:before,.md-typeset details.tldr>.admonition-title:before,.md-typeset details.tldr>summary:before{color:#00b0ff;content:"\E8D2"}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-left:.4rem solid #00b8d4}.md-typeset .admonition.info>.admonition-title,.md-typeset .admonition.info>summary,.md-typeset .admonition.todo>.admonition-title,.md-typeset .admonition.todo>summary,.md-typeset details.info>.admonition-title,.md-typeset details.info>summary,.md-typeset details.todo>.admonition-title,.md-typeset details.todo>summary{border-bottom:.1rem solid rgba(0,184,212,.1);background-color:rgba(0,184,212,.1)}.md-typeset .admonition.info>.admonition-title:before,.md-typeset .admonition.info>summary:before,.md-typeset .admonition.todo>.admonition-title:before,.md-typeset .admonition.todo>summary:before,.md-typeset details.info>.admonition-title:before,.md-typeset details.info>summary:before,.md-typeset details.todo>.admonition-title:before,.md-typeset details.todo>summary:before{color:#00b8d4;content:"\E88E"}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-left:.4rem solid #00bfa5}.md-typeset .admonition.hint>.admonition-title,.md-typeset .admonition.hint>summary,.md-typeset .admonition.important>.admonition-title,.md-typeset .admonition.important>summary,.md-typeset .admonition.tip>.admonition-title,.md-typeset .admonition.tip>summary,.md-typeset details.hint>.admonition-title,.md-typeset details.hint>summary,.md-typeset details.important>.admonition-title,.md-typeset details.important>summary,.md-typeset details.tip>.admonition-title,.md-typeset details.tip>summary{border-bottom:.1rem solid rgba(0,191,165,.1);background-color:rgba(0,191,165,.1)}.md-typeset .admonition.hint>.admonition-title:before,.md-typeset .admonition.hint>summary:before,.md-typeset .admonition.important>.admonition-title:before,.md-typeset .admonition.important>summary:before,.md-typeset .admonition.tip>.admonition-title:before,.md-typeset .admonition.tip>summary:before,.md-typeset details.hint>.admonition-title:before,.md-typeset details.hint>summary:before,.md-typeset details.important>.admonition-title:before,.md-typeset details.important>summary:before,.md-typeset details.tip>.admonition-title:before,.md-typeset details.tip>summary:before{color:#00bfa5;content:"\E80E"}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-left:.4rem solid #00c853}.md-typeset .admonition.check>.admonition-title,.md-typeset .admonition.check>summary,.md-typeset .admonition.done>.admonition-title,.md-typeset .admonition.done>summary,.md-typeset .admonition.success>.admonition-title,.md-typeset .admonition.success>summary,.md-typeset details.check>.admonition-title,.md-typeset details.check>summary,.md-typeset details.done>.admonition-title,.md-typeset details.done>summary,.md-typeset details.success>.admonition-title,.md-typeset details.success>summary{border-bottom:.1rem solid rgba(0,200,83,.1);background-color:rgba(0,200,83,.1)}.md-typeset .admonition.check>.admonition-title:before,.md-typeset .admonition.check>summary:before,.md-typeset .admonition.done>.admonition-title:before,.md-typeset .admonition.done>summary:before,.md-typeset .admonition.success>.admonition-title:before,.md-typeset .admonition.success>summary:before,.md-typeset details.check>.admonition-title:before,.md-typeset details.check>summary:before,.md-typeset details.done>.admonition-title:before,.md-typeset details.done>summary:before,.md-typeset details.success>.admonition-title:before,.md-typeset details.success>summary:before{color:#00c853;content:"\E876"}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-left:.4rem solid #64dd17}.md-typeset .admonition.faq>.admonition-title,.md-typeset .admonition.faq>summary,.md-typeset .admonition.help>.admonition-title,.md-typeset .admonition.help>summary,.md-typeset .admonition.question>.admonition-title,.md-typeset .admonition.question>summary,.md-typeset details.faq>.admonition-title,.md-typeset details.faq>summary,.md-typeset details.help>.admonition-title,.md-typeset details.help>summary,.md-typeset details.question>.admonition-title,.md-typeset details.question>summary{border-bottom:.1rem solid rgba(100,221,23,.1);background-color:rgba(100,221,23,.1)}.md-typeset .admonition.faq>.admonition-title:before,.md-typeset .admonition.faq>summary:before,.md-typeset .admonition.help>.admonition-title:before,.md-typeset .admonition.help>summary:before,.md-typeset .admonition.question>.admonition-title:before,.md-typeset .admonition.question>summary:before,.md-typeset details.faq>.admonition-title:before,.md-typeset details.faq>summary:before,.md-typeset details.help>.admonition-title:before,.md-typeset details.help>summary:before,.md-typeset details.question>.admonition-title:before,.md-typeset details.question>summary:before{color:#64dd17;content:"\E887"}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-left:.4rem solid #ff9100}.md-typeset .admonition.attention>.admonition-title,.md-typeset .admonition.attention>summary,.md-typeset .admonition.caution>.admonition-title,.md-typeset .admonition.caution>summary,.md-typeset .admonition.warning>.admonition-title,.md-typeset .admonition.warning>summary,.md-typeset details.attention>.admonition-title,.md-typeset details.attention>summary,.md-typeset details.caution>.admonition-title,.md-typeset details.caution>summary,.md-typeset details.warning>.admonition-title,.md-typeset details.warning>summary{border-bottom:.1rem solid rgba(255,145,0,.1);background-color:rgba(255,145,0,.1)}.md-typeset .admonition.attention>.admonition-title:before,.md-typeset .admonition.attention>summary:before,.md-typeset .admonition.caution>.admonition-title:before,.md-typeset .admonition.caution>summary:before,.md-typeset .admonition.warning>.admonition-title:before,.md-typeset .admonition.warning>summary:before,.md-typeset details.attention>.admonition-title:before,.md-typeset details.attention>summary:before,.md-typeset details.caution>.admonition-title:before,.md-typeset details.caution>summary:before,.md-typeset details.warning>.admonition-title:before,.md-typeset details.warning>summary:before{color:#ff9100;content:"\E002"}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-left:.4rem solid #ff5252}.md-typeset .admonition.fail>.admonition-title,.md-typeset .admonition.fail>summary,.md-typeset .admonition.failure>.admonition-title,.md-typeset .admonition.failure>summary,.md-typeset .admonition.missing>.admonition-title,.md-typeset .admonition.missing>summary,.md-typeset details.fail>.admonition-title,.md-typeset details.fail>summary,.md-typeset details.failure>.admonition-title,.md-typeset details.failure>summary,.md-typeset details.missing>.admonition-title,.md-typeset details.missing>summary{border-bottom:.1rem solid rgba(255,82,82,.1);background-color:rgba(255,82,82,.1)}.md-typeset .admonition.fail>.admonition-title:before,.md-typeset .admonition.fail>summary:before,.md-typeset .admonition.failure>.admonition-title:before,.md-typeset .admonition.failure>summary:before,.md-typeset .admonition.missing>.admonition-title:before,.md-typeset .admonition.missing>summary:before,.md-typeset details.fail>.admonition-title:before,.md-typeset details.fail>summary:before,.md-typeset details.failure>.admonition-title:before,.md-typeset details.failure>summary:before,.md-typeset details.missing>.admonition-title:before,.md-typeset details.missing>summary:before{color:#ff5252;content:"\E14C"}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-left:.4rem solid #ff1744}.md-typeset .admonition.danger>.admonition-title,.md-typeset .admonition.danger>summary,.md-typeset .admonition.error>.admonition-title,.md-typeset .admonition.error>summary,.md-typeset details.danger>.admonition-title,.md-typeset details.danger>summary,.md-typeset details.error>.admonition-title,.md-typeset details.error>summary{border-bottom:.1rem solid rgba(255,23,68,.1);background-color:rgba(255,23,68,.1)}.md-typeset .admonition.danger>.admonition-title:before,.md-typeset .admonition.danger>summary:before,.md-typeset .admonition.error>.admonition-title:before,.md-typeset .admonition.error>summary:before,.md-typeset details.danger>.admonition-title:before,.md-typeset details.danger>summary:before,.md-typeset details.error>.admonition-title:before,.md-typeset details.error>summary:before{color:#ff1744;content:"\E3E7"}.md-typeset .admonition.bug,.md-typeset details.bug{border-left:.4rem solid #f50057}.md-typeset .admonition.bug>.admonition-title,.md-typeset .admonition.bug>summary,.md-typeset details.bug>.admonition-title,.md-typeset details.bug>summary{border-bottom:.1rem solid rgba(245,0,87,.1);background-color:rgba(245,0,87,.1)}.md-typeset .admonition.bug>.admonition-title:before,.md-typeset .admonition.bug>summary:before,.md-typeset details.bug>.admonition-title:before,.md-typeset details.bug>summary:before{color:#f50057;content:"\E868"}.md-typeset .admonition.example,.md-typeset details.example{border-left:.4rem solid #651fff}.md-typeset .admonition.example>.admonition-title,.md-typeset .admonition.example>summary,.md-typeset details.example>.admonition-title,.md-typeset details.example>summary{border-bottom:.1rem solid rgba(101,31,255,.1);background-color:rgba(101,31,255,.1)}.md-typeset .admonition.example>.admonition-title:before,.md-typeset .admonition.example>summary:before,.md-typeset details.example>.admonition-title:before,.md-typeset details.example>summary:before{color:#651fff;content:"\E242"}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-left:.4rem solid #9e9e9e}.md-typeset .admonition.cite>.admonition-title,.md-typeset .admonition.cite>summary,.md-typeset .admonition.quote>.admonition-title,.md-typeset .admonition.quote>summary,.md-typeset details.cite>.admonition-title,.md-typeset details.cite>summary,.md-typeset details.quote>.admonition-title,.md-typeset details.quote>summary{border-bottom:.1rem solid hsla(0,0%,62%,.1);background-color:hsla(0,0%,62%,.1)}.md-typeset .admonition.cite>.admonition-title:before,.md-typeset .admonition.cite>summary:before,.md-typeset .admonition.quote>.admonition-title:before,.md-typeset .admonition.quote>summary:before,.md-typeset details.cite>.admonition-title:before,.md-typeset details.cite>summary:before,.md-typeset details.quote>.admonition-title:before,.md-typeset details.quote>summary:before{color:#9e9e9e;content:"\E244"}.codehilite .o,.codehilite .ow,.md-typeset .highlight .o,.md-typeset .highlight .ow{color:inherit}.codehilite .ge,.md-typeset .highlight .ge{color:#000}.codehilite .gr,.md-typeset .highlight .gr{color:#a00}.codehilite .gh,.md-typeset .highlight .gh{color:#999}.codehilite .go,.md-typeset .highlight .go{color:#888}.codehilite .gp,.md-typeset .highlight .gp{color:#555}.codehilite .gs,.md-typeset .highlight .gs{color:inherit}.codehilite .gu,.md-typeset .highlight .gu{color:#aaa}.codehilite .gt,.md-typeset .highlight .gt{color:#a00}.codehilite .gd,.md-typeset .highlight .gd{background-color:#fdd}.codehilite .gi,.md-typeset .highlight .gi{background-color:#dfd}.codehilite .k,.md-typeset .highlight .k{color:#3b78e7}.codehilite .kc,.md-typeset .highlight .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn,.md-typeset .highlight .kd,.md-typeset .highlight .kn{color:#3b78e7}.codehilite .kp,.md-typeset .highlight .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt,.md-typeset .highlight .kr,.md-typeset .highlight .kt{color:#3e61a2}.codehilite .c,.codehilite .cm,.md-typeset .highlight .c,.md-typeset .highlight .cm{color:#999}.codehilite .cp,.md-typeset .highlight .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs,.md-typeset .highlight .c1,.md-typeset .highlight .ch,.md-typeset .highlight .cs{color:#999}.codehilite .na,.codehilite .nb,.md-typeset .highlight .na,.md-typeset .highlight .nb{color:#c2185b}.codehilite .bp,.md-typeset .highlight .bp{color:#3e61a2}.codehilite .nc,.md-typeset .highlight .nc{color:#c2185b}.codehilite .no,.md-typeset .highlight .no{color:#3e61a2}.codehilite .nd,.codehilite .ni,.md-typeset .highlight .nd,.md-typeset .highlight .ni{color:#666}.codehilite .ne,.codehilite .nf,.md-typeset .highlight .ne,.md-typeset .highlight .nf{color:#c2185b}.codehilite .nl,.md-typeset .highlight .nl{color:#3b5179}.codehilite .nn,.md-typeset .highlight .nn{color:#ec407a}.codehilite .nt,.md-typeset .highlight .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi,.md-typeset .highlight .nv,.md-typeset .highlight .vc,.md-typeset .highlight .vg,.md-typeset .highlight .vi{color:#3e61a2}.codehilite .nx,.md-typeset .highlight .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo,.md-typeset .highlight .il,.md-typeset .highlight .m,.md-typeset .highlight .mf,.md-typeset .highlight .mh,.md-typeset .highlight .mi,.md-typeset .highlight .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc,.md-typeset .highlight .s,.md-typeset .highlight .sb,.md-typeset .highlight .sc{color:#0d904f}.codehilite .sd,.md-typeset .highlight .sd{color:#999}.codehilite .s2,.md-typeset .highlight .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx,.md-typeset .highlight .se,.md-typeset .highlight .sh,.md-typeset .highlight .si,.md-typeset .highlight .sx{color:#183691}.codehilite .sr,.md-typeset .highlight .sr{color:#009926}.codehilite .s1,.codehilite .ss,.md-typeset .highlight .s1,.md-typeset .highlight .ss{color:#0d904f}.codehilite .err,.md-typeset .highlight .err{color:#a61717}.codehilite .w,.md-typeset .highlight .w{color:transparent}.codehilite .hll,.md-typeset .highlight .hll{display:block;margin:0 -1.2rem;padding:0 1.2rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilite,.md-typeset .highlight{position:relative;margin:1em 0;padding:0;border-radius:.2rem;background-color:hsla(0,0%,93%,.5);color:#37474f;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset .codehilite code,.md-typeset .codehilite pre,.md-typeset .highlight code,.md-typeset .highlight pre{display:block;margin:0;padding:1.05rem 1.2rem;background-color:transparent;overflow:auto;vertical-align:top}.md-typeset .codehilite code::-webkit-scrollbar,.md-typeset .codehilite pre::-webkit-scrollbar,.md-typeset .highlight code::-webkit-scrollbar,.md-typeset .highlight pre::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset .codehilite code::-webkit-scrollbar-thumb,.md-typeset .codehilite pre::-webkit-scrollbar-thumb,.md-typeset .highlight code::-webkit-scrollbar-thumb,.md-typeset .highlight pre::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset .codehilite code::-webkit-scrollbar-thumb:hover,.md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,.md-typeset .highlight code::-webkit-scrollbar-thumb:hover,.md-typeset .highlight pre::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset pre.codehilite,.md-typeset pre.highlight{overflow:visible}.md-typeset pre.codehilite code,.md-typeset pre.highlight code{display:block;padding:1.05rem 1.2rem;overflow:auto}.md-typeset .codehilitetable{display:block;margin:1em 0;border-radius:.2em;font-size:1.6rem;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td{display:block;padding:0}.md-typeset .codehilitetable tr{display:-webkit-box;display:-ms-flexbox;display:flex}.md-typeset .codehilitetable .codehilite,.md-typeset .codehilitetable .highlight,.md-typeset .codehilitetable .linenodiv{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv{padding:1.05rem 1.2rem}.md-typeset .codehilitetable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre{margin:0;padding:0;background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable{-webkit-box-shadow:none;box-shadow:none}.md-typeset [id^="fnref:"]{display:inline-block}.md-typeset [id^="fnref:"]:target{margin-top:-7.6rem;padding-top:7.6rem;pointer-events:none}.md-typeset [id^="fn:"]:before{display:none;height:0;content:""}.md-typeset [id^="fn:"]:target:before{display:block;margin-top:-7rem;padding-top:7rem;pointer-events:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-ref{display:inline-block;pointer-events:auto}.md-typeset .footnote-ref:before{display:inline;margin:0 .2em;border-left:.1rem solid rgba(0,0,0,.26);font-size:1.25em;content:"";vertical-align:-.5rem}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.5rem);transform:translateX(.5rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}.md-typeset .footnote-backref:before{font-size:1.6rem;content:"\E31B"}.md-typeset .headerlink{display:inline-block;margin-left:1rem;-webkit-transform:translateY(.5rem);transform:translateY(.5rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset h1[id] .headerlink{display:none}.md-typeset h2[id]:before{display:block;margin-top:-.8rem;padding-top:.8rem;content:""}.md-typeset h2[id]:target:before{margin-top:-6.8rem;padding-top:6.8rem}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink,.md-typeset h2[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink:hover,.md-typeset h2[id]:target .headerlink{color:#536dfe}.md-typeset h3[id]:before{display:block;margin-top:-.9rem;padding-top:.9rem;content:""}.md-typeset h3[id]:target:before{margin-top:-6.9rem;padding-top:6.9rem}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink,.md-typeset h3[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink:hover,.md-typeset h3[id]:target .headerlink{color:#536dfe}.md-typeset h4[id]:before{display:block;margin-top:-.9rem;padding-top:.9rem;content:""}.md-typeset h4[id]:target:before{margin-top:-6.9rem;padding-top:6.9rem}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink,.md-typeset h4[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink:hover,.md-typeset h4[id]:target .headerlink{color:#536dfe}.md-typeset h5[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem;content:""}.md-typeset h5[id]:target:before{margin-top:-7.1rem;padding-top:7.1rem}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink,.md-typeset h5[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink:hover,.md-typeset h5[id]:target .headerlink{color:#536dfe}.md-typeset h6[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem;content:""}.md-typeset h6[id]:target:before{margin-top:-7.1rem;padding-top:7.1rem}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink,.md-typeset h6[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink:hover,.md-typeset h6[id]:target .headerlink{color:#536dfe}.md-typeset .MJXc-display{margin:.75em 0;padding:.75em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;-webkit-box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;-webkit-box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,93%,.5);color:#37474f;-webkit-box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5);box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"\E0B7";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:1.6rem;padding-left:1.6rem;-webkit-box-shadow:none;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset details{padding-top:0}.md-typeset details[open]>summary:after{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.md-typeset details:not([open]){padding-bottom:0}.md-typeset details:not([open])>summary{border-bottom:none}.md-typeset details summary{padding-right:4rem}.no-details .md-typeset details:not([open])>*{display:none}.no-details .md-typeset details:not([open]) summary{display:block}.md-typeset summary{display:block;outline:none;cursor:pointer}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset summary:after{position:absolute;top:.8rem;right:1.2rem;color:rgba(0,0,0,.26);font-size:2rem;content:"\E313"}.md-typeset .emojione{width:2rem;vertical-align:text-top}.md-typeset code.codehilite,.md-typeset code.highlight{margin:0 .29412em;padding:.07353em 0}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.15em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.25em;content:"\E835";vertical-align:-.25em}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"\E834"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code,.md-typeset pre{white-space:pre-wrap}.md-typeset code{-webkit-box-shadow:none;box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-clipboard,.md-content__icon,.md-footer,.md-header,.md-sidebar,.md-tabs,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset pre{margin:1em -1.6rem;border-radius:0}.md-typeset pre>code{padding:1.05rem 1.6rem}.md-footer-nav__link--prev .md-footer-nav__title{display:none}.md-search-result__teaser{max-height:5rem;-webkit-line-clamp:3}.codehilite .hll,.md-typeset .highlight .hll{margin:0 -1.6rem;padding:0 1.6rem}.md-typeset>.codehilite,.md-typeset>.highlight{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilite code,.md-typeset>.codehilite pre,.md-typeset>.highlight code,.md-typeset>.highlight pre{padding:1.05rem 1.6rem}.md-typeset>.codehilitetable{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilitetable .codehilite>code,.md-typeset>.codehilitetable .codehilite>pre,.md-typeset>.codehilitetable .highlight>code,.md-typeset>.codehilitetable .highlight>pre,.md-typeset>.codehilitetable .linenodiv{padding:1rem 1.6rem}.md-typeset>p>.MJXc-display{margin:.75em -1.6rem;padding:.25em 1.6rem}}@media only screen and (min-width:100em){html{font-size:68.75%}}@media only screen and (min-width:125em){html{font-size:75%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}html .md-nav__link[for=toc]{display:block;padding-right:4.8rem}html .md-nav__link[for=toc]:after{color:inherit;content:"\E8DE"}html .md-nav__link[for=toc]+.md-nav__link{display:none}html .md-nav__link[for=toc]~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-nav__source{display:block;padding:0 .4rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{position:absolute;top:.4rem;left:.4rem;width:3.6rem;height:3.6rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:2rem;background-color:#fff;overflow:hidden;pointer-events:none}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;width:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}.md-search__input{width:100%;height:4.8rem;font-size:1.8rem}.md-search__icon[for=search]{top:1.2rem;left:1.6rem}.md-search__icon[for=search][for=search]:before{content:"\E5C4"}.md-search__icon[type=reset]{top:1.2rem;right:1.6rem}.md-search__output{top:4.8rem;bottom:0}.md-search-result__article--document:before{display:none}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-hero__inner{margin-top:4.8rem;margin-bottom:2.4rem}.md-nav{background-color:#fff}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;height:100%;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:1.6rem;line-height:1.5}html .md-nav--primary .md-nav__title{position:relative;height:11.2rem;padding:6rem 1.6rem .4rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:4.8rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.4rem;left:.4rem;width:4rem;height:4rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background-color:#fff;-webkit-box-shadow:0 .1rem 0 rgba(0,0,0,.07) inset;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.4rem;left:.4rem;width:6.4rem;height:6.4rem;font-size:4.8rem}html .md-nav--primary .md-nav__title--site:before{display:none}.md-nav--primary .md-nav__list{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.1rem solid rgba(0,0,0,.07)}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:4.8rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E315"}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:1.2rem 1.6rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:1.2rem;margin-top:-1.2rem;color:inherit;font-size:2.4rem}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:transparent}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:2.8rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:4rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:5.2rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:6.4rem}.md-nav__toggle~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}.no-csstransforms3d .md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.no-csstransforms3d .md-nav__toggle:checked~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-sidebar--primary{position:fixed;top:0;left:-24.2rem;width:24.2rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;transition:-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;background-color:#fff;z-index:3}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{-webkit-box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(24.2rem);transform:translateX(24.2rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}.md-tabs{display:none}}@media only screen and (min-width:60em){.md-content{margin-right:24.2rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:23rem;max-width:23rem;margin-left:2.8rem;padding-right:1.2rem}.md-search{padding:.4rem}.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);cursor:pointer}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-search__inner{position:relative;width:23rem;padding:.2rem 0;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1)}.md-search__form,.md-search__input{border-radius:.2rem}.md-search__input{width:100%;height:3.6rem;padding-left:4.4rem;-webkit-transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);background-color:rgba(0,0,0,.26);color:inherit;font-size:1.6rem}.md-search__input+.md-search__icon{color:inherit}.md-search__input::-webkit-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.2rem .2rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:none}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder,[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:3.8rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{-webkit-box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__article,.md-search-result__meta{padding-left:4.4rem}.md-sidebar--secondary{display:block;margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:24.2rem}.md-content__inner{margin-right:2.4rem;margin-left:2.4rem}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:68.8rem}.md-sidebar--secondary{margin-left:122rem}.md-tabs~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{font-size:0}.md-tabs--active~.md-main .md-nav--primary .md-nav__title--site{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item{font-size:0}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{display:none;font-size:1.4rem;overflow:auto}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link{margin-top:0;font-weight:700;pointer-events:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link:after{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active{display:block}.md-tabs--active~.md-main .md-nav[data-md-level="1"]{max-height:none}.md-tabs--active~.md-main .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-left:0}}@media only screen and (min-width:45em){.md-footer-nav__link{width:50%}.md-footer-copyright{max-width:75%;float:left}.md-footer-social{padding:1.2rem 0;float:right}}@media only screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:46.8rem}.md-search-result__teaser{max-height:5rem;-webkit-line-clamp:3}} +/*# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IiIsImZpbGUiOiJhc3NldHMvc3R5bGVzaGVldHMvYXBwbGljYXRpb24uMGU5YzhhY2EuY3NzIiwic291cmNlUm9vdCI6IiJ9*/ \ No newline at end of file diff --git a/docs/guides/one_off_task/index.html b/docs/guides/one_off_task/index.html index 4194b717c..6360c74b9 100644 --- a/docs/guides/one_off_task/index.html +++ b/docs/guides/one_off_task/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - One-off Task - Layer0 - + @@ -18,461 +18,686 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + One-off Task - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - + + -

    Deployment guide: Guestbook one-off task#

    +
    +
    + + + + + +

    Deployment guide: Guestbook one-off task#

    In this example, you will learn how to use layer0 to run a one-off task. A task is used to run a single instance of your Task Definition and is typically a short running job that will be stopped once finished.


    Before you start#

    @@ -485,8 +710,9 @@

    Part 2: Create a deployNext, you will create a new deploy for the task using the deploy create command. At the command prompt, run the following command:

    l0 deploy create Dockerrun.aws.json one-off-task-dpl

    You will see the following output:

    -
    DEPLOY ID           DEPLOY NAME        VERSION
    -one-off-task-dpl.1  one-off-task-dpl   1
    +
    DEPLOY ID           DEPLOY NAME        VERSION
    +one-off-task-dpl.1  one-off-task-dpl   1
    +

    Part 3: Create the task#

    @@ -494,8 +720,9 @@

    Part 3: Create the taskTo run the task, use the following command:

    l0 task create demo-env echo-tsk one-off-task-dpl:latest --wait

    You will see the following output:

    -
    TASK ID       TASK NAME         ENVIRONMENT  DEPLOY              SCALE
    -one-off851c9  echo-tsk          demo-env     one-off-task-dpl:1  0/1 (1)
    +
    TASK ID       TASK NAME         ENVIRONMENT  DEPLOY              SCALE
    +one-off851c9  echo-tsk          demo-env     one-off-task-dpl:1  0/1 (1)
    +

    The SCALE column shows the running, desired and pending counts. A value of 0/1 (1) indicates that running = 0, desired = 1 and (1) for 1 pending task that is about to transition to running state. After your task has finished running, note that the desired count will remain 1 and pending value will no longer be shown, so the value will be 0/1 for a finished task.

    @@ -503,15 +730,16 @@

    Part 4: Check the status of the tas

    To view the logs for this task, and evaluate its progress, you can use the task logs command:

    l0 task logs one-off-task-tsk

    You will see the following output:

    -
    alpine
    -------
    -Task finished!
    +
    alpine
    +------
    +Task finished!
    +

    You can also use the following command for more information in the task.

    l0 -o json task get echo-tsk

    Outputs:

    -
    [
    +
    [
         {
             "copies": [
                 {
    @@ -531,12 +759,13 @@ 

    Part 4: Check the status of the tas "task_id": "echotsk1facd", "task_name": "echo-tsk" } -]

    +] +

    After the task has finished, running l0 -o json task get echo-tsk will show a pending_count of 0.

    Outputs:

    -
    ...
    +
    + + + - - - - - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/guides/terraform_beyond_layer0/index.html b/docs/guides/terraform_beyond_layer0/index.html deleted file mode 100644 index a77facb7b..000000000 --- a/docs/guides/terraform_beyond_layer0/index.html +++ /dev/null @@ -1,985 +0,0 @@ - - - - - - - - - - - - Terraform beyond Layer0 - Layer0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    -
    - - - -
    - -
    -
    - -
    - -
    -
    -
    - -

    Deployment guide: Terraform beyond Layer0#

    -

    In this example, we'll learn how you can use Terraform to create a Layer0 service as well as a persistent data store. The main goal of this example is to explore how you can combine Layer0 with other Terraform providers and best practices.

    -

    Before you start#

    -

    To complete the procedures in this section, you must have the following installed and configured correctly:

    -
      -
    • Layer0 v0.8.4 or later
    • -
    • Terraform v0.9.0 or later
    • -
    • Layer0 Terraform Provider
    • -
    -

    If you have not already configured Layer0, see the Layer0 installation guide. If you are running an older version of Layer0, see the Layer0 upgrade instructions.

    -

    See the Terraform installation guide to install Terraform and the Layer0 Terraform Plugin.

    -
    -

    Deploy with Terraform#

    -

    Using Terraform, you will deploy a simple guestbook application backed by AWS DynamoDB Table. The terraform configuration file will use both the Layer0 and AWS Terraform providers, to deploy the guestbook application and provision a new DynamoDB Table.

    -

    Part 1: Clone the guides repository#

    -

    Run this command to clone the quintilesims/guides repository:

    -

    git clone https://github.com/quintilesims/guides.git

    -

    Once you have cloned the repository, navigate to the guides/terraform-beyond-layer0/example-1 folder for the rest of this example.

    -

    Part 2: Terraform Plan#

    -
    -

    Note

    -

    As we're using modules in our Terraform configuration, we need to run terraform get command before performing other terraform operations. Running terraform get will download the modules to your local folder named .terraform. See here for more information on terraform get.

    -

    terraform get

    -

    Get: file:///Users//go/src/github.com/quintilesims/guides/terraform-beyond-layer0/example-1/modules/guestbook_service

    -
    -

    Before deploying, we can run the following command to see what changes Terraform will make to your infrastructure should you go ahead and apply. If you had any errors in your layer0.tf file, running terraform plan would output those errors so that you can address them. Also, Terraform will prompt you for configuration values that it does not have.

    -
    -

    Tip

    -

    There are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the terraform.tfvars file, or exporting environment variables like TF_VAR_endpoint and TF_VAR_token, for example). See the Terraform Docs for more.

    -
    -

    terraform plan

    -
    var.endpoint
    -  Enter a value: <enter your Layer0 endpoint>
    -
    -var.token
    -  Enter a value: <enter your Layer0 token>
    -...
    -+ aws_DynamoDB_table.guestbook
    -    arn:                       "<computed>"
    -    attribute.#:               "1"
    -    attribute.4228504427.name: "id"
    -    attribute.4228504427.type: "S"
    -    hash_key:                  "id"
    -    name:                      "guestbook"
    -    read_capacity:             "20"
    -    stream_arn:                "<computed>"
    -    stream_enabled:            "<computed>"
    -    stream_view_type:          "<computed>"
    -    write_capacity:            "20"
    -
    -...
    - - -

    Part 3: Terraform Apply#

    -

    Run the following command to begin the deploy process.

    -

    terraform apply

    -
    layer0_environment.demo: Refreshing state...
    -...
    -...
    -...
    -layer0_service.guestbook: Creation complete
    -
    -Apply complete! Resources: 7 added, 0 changed, 0 destroyed.
    -
    -The state of your infrastructure has been saved to the path
    -below. This state is required to modify and destroy your
    -infrastructure, so keep it safe. To inspect the complete state
    -use the `terraform show` command.
    -
    -State path: terraform.tfstate
    -
    -Outputs:
    -
    -guestbook_url = <http endpoint for the sample application>
    - - -
    -

    Note

    -

    It may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time, you may get HTTP 503 errors when making HTTP requests against the load balancer URL.

    -
    -

    Terraform will set up the entire environment for you and then output a link to the application's load balancer.

    -

    What's happening#

    -

    Terraform using the AWS provider, provisions a new DynamoDB table. It also uses the Layer0 provider to provision the environment, deploy, load balancer and service required to run the entire guestbook application.

    -

    Looking at an excerpt of the file ./terraform-beyond-layer0/example-1/modules/guestbook_service/main.tf, we can see the following definitions:

    -
    resource "aws_dynamodb_table" "guestbook" {
    -  name           = "${var.table_name}"
    -  read_capacity  = 20
    -  write_capacity = 20
    -  hash_key       = "id"
    -
    -  attribute {
    -    name = "id"
    -    type = "S"
    -  }
    -}
    -
    -resource "layer0_deploy" "guestbook" {
    -  name    = "guestbook"
    -  content = "${data.template_file.guestbook.rendered}"
    -}
    -
    -data "template_file" "guestbook" {
    -  template = "${file("Dockerrun.aws.json")}"
    -
    -  vars {
    -    access_key = "${var.access_key}"
    -    secret_key = "${var.secret_key}"
    -    region     = "${var.region}"
    -    table_name = "${aws_dynamodb_table.guestbook.name}"
    -  }
    -}
    - - -

    Note the resource definitions for aws_dynamodb_table and layer0_deploy. To configure the guestbook application to use the provisioned DynamoDB table, we reference the name property from the DynamoDB definition table_name = "${aws_dynamodb_table.guestbook.name}".

    -

    These vars are used to populate the template fields in our Dockerrun.aws.json file.

    -
    {
    -    "AWSEBDockerrunVersion": 2,
    -    "containerDefinitions": [
    -        {
    -            "name": "guestbook",
    -            "image": "quintilesims/guestbook-db",
    -            "essential": true,
    -            "memory": 128,
    -            "environment": [
    -                {
    -                    "name": "DYNAMO_TABLE",
    -                    "value": "${table_name}"
    -                }
    -                ...
    - - -

    The Layer0 configuration referencing the AWS DynamoDB configuration table_name = "${aws_DynamoDB_table.guestbook.name}", infers an implicit dependency. Before Terraform creates the infrastructure, it will use this information to order the resource creation and create resources in parallel, where there are no dependencies. In this example, the AWS DynamoDB table will be created before the Layer0 deploy. See Terraform Resource Dependencies for more information.

    -

    Part 4: Scaling a Layer0 Service#

    -

    The workflow to make changes to your infrastructure generally involves updating your Terraform configuration file followed by a terraform plan and terraform apply.

    -

    Update the Terraform configuration#

    -

    Open the file ./example-1/modules/guestbook_service/main.tf in a text editor and make the change to add a scale property with a value of 3 to the layer0_service section. For more information about the scale property, see Layer0 Terraform Plugin documentation. The result should look like the below:

    -

    example-1/modules/guestbook_service/main.tf

    -
    # Create a service named "guestbook"
    -resource "layer0_service" "guestbook" {
    -  name          = "guestbook"
    -  environment   = "${layer0_environment.demo.id}"
    -  deploy        = "${layer0_deploy.guestbook.id}"
    -  load_balancer = "${layer0_load_balancer.guestbook.id}"
    -  scale         = 3
    -}
    - - -

    Plan and Apply#

    -

    Execute the terraform plan command to understand the changes that you will be making. Note that if you did not specify scale, it defaults to '1'.

    -

    terraform plan

    -

    Outputs:

    -
    ...
    -
    -~ module.guestbook.layer0_service.guestbook
    -    scale: "1" => "3"
    - - -

    Now run the following command to deploy your changes:

    -

    terraform apply

    -

    Outputs:

    -
    layer0_environment.demo: Refreshing state... (ID: demoenvbb9f6)
    -data.template_file.guestbook: Refreshing state...
    -layer0_deploy.guestbook: Refreshing state... (ID: guestbook.6)
    -layer0_load_balancer.guestbook: Refreshing state... (ID: guestbo43ab0)
    -layer0_service.guestbook: Refreshing state... (ID: guestboebca1)
    -layer0_service.guestbook: Modifying... (ID: guestboebca1)
    -  scale: "1" => "3"
    -layer0_service.guestbook: Modifications complete (ID: guestboebca1)
    -
    -Apply complete! Resources: 0 added, 1 changed, 0 destroyed.
    -
    -The state of your infrastructure has been saved to the path
    -below. This state is required to modify and destroy your
    -infrastructure, so keep it safe. To inspect the complete state
    -use the `terraform show` command.
    -
    -State path: 
    -
    -Outputs:
    -
    -services = <guestbook_service_url>
    - - -

    To confirm your service has been updated to the desired scale, you can run the following layer0 command. Note that the desired scale for the guestbook service should be eventually be 3/3.

    -

    l0 service get guestbook1_guestbook_svc -Outputs:

    -
    SERVICE ID    SERVICE NAME  ENVIRONMENT  LOADBALANCER  DEPLOYMENTS  SCALE
    -SERVICE ID    SERVICE NAME              ENVIRONMENT  LOADBALANCER             DEPLOYMENTS                  SCALE
    -guestbo4fd3b  guestbook1_guestbook_svc  demo         guestbook1_guestbook_lb  guestbook1_guestbook_dpl:3*  1/3 (2)
    - - -

    As scale is a parameter we are likely to change in the future, rather than hardcoding it to 3 as we have done just now, it would be better to use a variable to store service_scale. The following Best Practices sections will show how you can achieve this.

    -
    -

    Best Practices with Terraform + Layer0

    -

    The following sections outline some of the best practices and tips to take into consideration, when using Layer0 with Terraform.

    -
    -

    Part 5: Terraform Remote State#

    -

    Terraform stores the state of the deployed infrastructure in a local file named terraform.tfstate by default. To find out more about why Terraform needs to store state, see Purpose of Terraform State.

    -

    How state is loaded and used for operations such as terraform apply is determined by a Backend. As mentioned, by default the state is stored locally which is enabled by a "local" backend.

    -

    Remote State#

    -

    By default, Terraform stores state locally but it can also be configured to store state in a remote backend. This can prove useful when you are working as part of a team to provision and manage services deployed by Terraform. All the members of the team will need access to the state file to apply new changes and be able to do so without overwriting each others' changes. See here for more information on the different backend types supported by Terraform.

    -

    To configure a remote backend, append the terraform section below to your terraform file ./example-1/main.tf. Populate the bucket property to an existing s3 bucket.

    -
    -

    Tip

    -

    If you have been following along with the guide, ./example-1/main.tf should already have the below section commented out. You can uncomment the terraform section and populate the bucket property with an appropriate value.

    -
    -
    terraform {
    -  backend "s3" {
    -    bucket     = "<my-bucket-name>"
    -    key        = "demo-env/remote-backend/terraform.tfstate"
    -    region     = "us-west-2"
    -  }
    -}
    - - -

    Once you have modified main.tf, you will need to initialize the newly configured backend by running the following command.

    -

    terraform init

    -

    Outputs:

    -
    Initializing the backend...
    -
    -Do you want to copy state from "local" to "consul"?
    -  ...
    -  Do you want to copy the state from "local" to "consul"? Enter "yes" to copy
    -  and "no" to start with the existing state in "consul".
    -
    -  Enter a value: 
    - - -

    Go ahead and enter: yes.

    -
    Successfully configured the backend "consul"! Terraform will automatically
    -use this backend unless the backend configuration changes.
    -
    -Terraform has been successfully initialized!
    -...
    - - -

    What's happening#

    -

    As you are configuring a backend for the first time, Terraform will give you an option to migrate your state to the new backend. From now on, any further changes to your infrastructure made by Terraform will result in the remote state file being updated. For more information see Terraform backends.

    -

    A new team member can use the main.tf from their own machine without obtaining a copy of the state file terraform.tfstate as the configuration will retrieve the state file from the remote backend.

    -

    Locking#

    -

    Not all remote backends support locking (locking ensures only one person is able to change the state at a time). The S3 backend we used earlier in the example supports locking which is disabled by default. The S3 backend uses a DynamoDB table to acquire a lock before making a change to the state file. To enable locking, you need to specify locking_table property with the name of an existing DynamoDB table. The DynamoDB table also needs primary key named LockID of type String.

    -

    Security#

    -

    A Terraform state file is written in plain text. This can lead to a situation where deploying resources that require sensitive data can result in the sensitive data being stored in the state file. To minimize exposure of sensitive data, you can enable server side encryption of the state file by adding property encrypt set to true.

    -

    This will ensure that the file is encrypted in S3 and by using a remote backend, you will also have the added benefit of the state file not being persisted to disk locally as it will only ever be held in memory by Terraform.

    -

    For securing the state file further, you can also enable access logging on the S3 bucket you are using for the remote backend, which can help track down invalid access should it occur.

    -

    Part 6: Terraform Configuration Structure#

    -

    While there are many different approaches to organizing your Terraform code, we suggest using the following file structure:

    -
    example1/  # contains overarching Terraform deployment, pulls in any modules that might exist
    -  ─ main.tf  
    -  ─ variables.tf  
    -  ─ output.tf  
    -  + modules/  # if you can break up deployment into smaller modules, keep the modules in here
    -      + guestbook_service/  # contains Terraform configuration for a module
    -        ─ main.tf  
    -        ─ variables.tf  
    -        ─ output.tf
    -      + service2/  # contains another module
    -      + service3/  # contains another module
    - - -

    Here we are making use of Terraform Modules. Modules in Terraform are self-contained packages of Terraform configurations, that are managed as a group. Modules are used to create reusable components in Terraform as well as for basic code organization. In this example, we are using modules to separate each service and making it consumable as a module.

    -

    If you wanted to add a new service, you can create a new service folder inside the ./modules. If you wanted to you could even run multiple copies of the same service. See here for more information about Creating Modules.

    -

    Also see the below repositories for ideas on different ways you can organize your Terraform configuration files for the needs of your specific project:

    - -

    Part 7: State Environments#

    -

    Layer0 recommends that you typically make a single environment for each tier of your application, such as dev, staging and production. That recommendation still holds when using Terraform with Layer0. Using Layer0 CLI, you can target a specific environment for most CLI commands. This enables you to service each tier relatively easily. In Terraform, there a few approaches you can take to enable a similar workflow.

    -

    Single Terraform Configuration#

    -

    You can use a single Terraform configuration to create and maintain multiple environments by making use of the Count parameter, inside a Resource. Count enables you to create multiple copies of a given resource.

    -

    For example

    -
    variable "environments" {
    -  type = "list"
    -
    -  default = [
    -    "dev",
    -    "staging"
    -    "production"
    -  ]
    -}
    -
    -resource "layer0_environment" "demo" {
    -  count = "${length(var.environments)}"
    -
    -  name = "${var.environments[count.index]}_demo"
    -}
    - - -

    Let's have a more in-depth look in how this works. You can start by navigating to `./terraform-beyond-layer0/example-2' folder. Start by running the plan command.

    -

    terraform plan

    -

    Outputs:

    -
    + module.environment.aws_dynamodb_table.guestbook.0
    -    ...
    -    name:                      "dev_guestbook"
    -...
    -+ module.environment.aws_dynamodb_table.guestbook.1
    -    ..
    -    name:                      "staging_guestbook"
    -...
    - - -

    Note that you will see a copy of each resource for each environment specified in your environments file in ./example-2/variables.tf. Go ahead and run apply.

    -

    terraform apply

    -

    Outputs:

    -
    Apply complete! Resources: 10 added, 0 changed, 0 destroyed.
    -
    -Outputs:
    -
    -guestbook_urls = 
    -<dev_url>
    -<staging_url>
    - - -

    You have now created two separate environments using a single terraform configuration: dev & staging. You can navigate to both the urls output and you should note that they are separate instances of the guestbook application backed with their own separate data store.

    -

    A common use case for maintaining different environments is to configure each environment slightly differently. For example, you might want to scale your Layer0 service to 3 for staging and leave it as 1 for the dev environment. This can be done easily by using conditional logic to set our scale parameter in the layer0 service configuration in ./example-2/main.tf. Go ahead and open main.tf in a text editor. Navigate to the layer0_service guestbook section. Uncomment the scale parameter so that your configuration looks like below.

    -
    resource "layer0_service" "guestbook" {
    -  count = "${length(var.environments)}"
    -
    -  name          = "${element(layer0_environment.demo.*.name, count.index)}_guestbook_svc"
    -  environment   = "${element(layer0_environment.demo.*.id, count.index)}"
    -  deploy        = "${element(layer0_deploy.guestbook.*.id, count.index)}"
    -  load_balancer = "${element(layer0_load_balancer.guestbook.*.id, count.index)}"
    -  scale         = scale         = "${lookup(var.service_scale, var.environments[count.index]), "1")}"
    -}
    - - -

    The variable service_scale is already defined in variables.tf. If you now go ahead and run plan, you will see that the guestbook service for only the staging environment will be scaled up.

    -

    terraform plan

    -

    Outputs:

    -
    ~ layer0_service.guestbook.1
    -    scale: "1" => "3"
    - - -

    A potential downside of this approach however is that all your environments are using the same state file. Sharing a state file breaks some of the resource encapsulation between environments. Should there ever be a situation where your state file becomes corrupt, it would affect your ability to service all the environments till you resolve the issue by potentially rolling back to a previous copy of the state file.

    -

    The next section will show you how you can separate your Terraform environment configuration such that each environment will have its own state file.

    -
    -

    Note

    -

    As previously mentioned, you will want to avoid hardcoding resource parameter configuration values as much as possible. As an example the scale property of a layer0 service. But this extends to other properties as well like docker image version etc. You should avoid using latest and specify a explicit version via configurable variable when possible.

    -
    -

    Multiple Terraform Configurations#

    -

    The previous example used a single set of Terraform Configuration files to create and maintain multiple environments. This resulted in a single state file which had the state information for all the environments. To avoid all environments sharing a single state file, you can split your Terraform configuration so that you a state file for each environment.

    -

    Go ahead and navigate to ./terraform-beyond-layer0/example-3 folder. Here we are using a folder to separate each environment. So env-dev and env-staging represent a dev and staging environment. To work with either of the environments, you will need to navigate into the desired environment's folder and run Terraform commands. This will ensure that each environment will have its own state file.

    -

    Open the env-dev folder inside a text editor. Note that main.tf doesn't contain any resource definitions. Instead, we only have one module definition which has various variables being passed in, which is also how we are passing in the environment variable. To create a dev and staging environments for our guestbook application, go ahead and run terraform plan and apply commands from env-dev and env-staging folders.

    -
    # assuming you are in the terraform-beyond-layer0/example-3 folder
    -cd env-dev
    -terraform get
    -terraform plan
    -terraform apply
    -
    -cd ../env-staging
    -terraform get
    -terraform plan
    -terraform apply
    - - -

    You should now have two instances of the guestbook application running. Note that our guestbook service in our staging environment has been scaled to 3. We have done this by specifying a map variable service_scale in ./example-3/dev-staging/variables.tf which can have different scale values for each environment.

    -

    Part 8: Multiple Provider Instances#

    -

    You can define multiple instances of the same provider that is uniquely customized. For example, you can have an aws provider to support multiple regions, different roles etc or in the case of the layer0 provider, to support multiple layer0 endpoints.

    -

    For example:

    -
    # aws provider
    -provider "aws" {
    -  alias = "east"
    -  region = "us-east-1"
    -  # ...
    -}
    -
    -# aws provider configured to a west region
    -provider "aws" {
    -  alias = "west"
    -  region = "us-west-1"
    -  # ...
    -}
    - - -

    This will now allow you to reference aws providers configured to a different region. You can do so by referencing the provider using the naming scheme TYPE.ALIAS, which in the above example results in aws.west. See Provider Configuration for more information.

    -
    resource "aws.east_instance" "foo" {
    -  # ...
    -}
    -
    -resource "aws.west_instance" "bar" {
    -  # ...
    -}
    - - -

    Part 9: Cleanup#

    -

    When you're finished with the examples in this guide, run the following destroy command in all the following directories to destroy the Layer0 environment, application and the DynamoDB Table.

    -

    Directories:

    -
      -
    • /example-1
    • -
    • /example-2
    • -
    • /example-3/env-dev
    • -
    • /example-3/env-staging
    • -
    -

    terraform destroy

    -
    -

    Remote Backend Resources

    -

    If you created additional resources (S3 bucket and a DynamoDB Table) separately when configuring a Remote Backend, do not forget to delete those if they are no longer needed. You should be able to look at your Terraform configuration file layer0.tf to determine the name of the bucket and table.

    -
    - - - - -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    - - - - - - \ No newline at end of file diff --git a/docs/guides/walkthrough/deployment-1/index.html b/docs/guides/walkthrough/deployment-1/index.html index f6033ba88..f05cb6ccf 100644 --- a/docs/guides/walkthrough/deployment-1/index.html +++ b/docs/guides/walkthrough/deployment-1/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Walkthrough: Deployment 1 - Layer0 - + @@ -18,443 +18,878 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Walkthrough: Deployment 1 - Layer0 + - - - - + + + + + - + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - +
    +
    +
    + +
    -
    -
    +
  • + + Part 1: Create the Environment + + +
  • + +
  • + + Part 2: Create the Load Balancer + + +
  • + +
  • + + Part 3: Deploy the ECS Task Definition + + +
  • + +
  • + + Part 4: Create the Service + + +
  • + +
  • + + Check the Status of the Service + + +
  • + +
  • + + Get the Application's URL + + +
  • + +
  • + + Logs + + +
  • + +
  • + + Cleanup + + +
  • + + + + + + +
  • + + Deploy with Terraform + + + + +
  • + + + + + -
    -
    -
    +
    +
    + + -

    Deployment 1: A Simple Guestbook App#

    +
    +
    + + + + + +

    Deployment 1: A Simple Guestbook App#

    In this section you'll learn how different Layer0 commands work together to deploy applications to the cloud. The example application in this section is a guestbook -- a web application that acts as a simple message board. You can choose to complete this section using either the Layer0 CLI or Terraform.

    @@ -484,32 +919,36 @@

    Part 1: Create the Environment

    l0 loadbalancer create --port 80:80/http demo-env guestbook-lb

    We should see output like the following:

    -
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE  PORTS       PUBLIC  URL
    -guestbodb65a     guestbook-lb       demo-env              80:80/HTTP  true
    +
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE  PORTS       PUBLIC  URL
    +guestbodb65a     guestbook-lb       demo-env              80:80/HTTP  true
    +

    The following is a summary of the arguments passed in the above command:

    @@ -557,8 +997,9 @@

    Part 3: Deploy the ECS Task Defin At the command prompt, execute the following:

    l0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl

    We should see output like the following:

    -
    DEPLOY ID        DEPLOY NAME    VERSION
    -guestbook-dpl.1  guestbook-dpl  1
    +
    DEPLOY ID        DEPLOY NAME    VERSION
    +guestbook-dpl.1  guestbook-dpl  1
    +

    The following is a summary of the arguments passed in the above command:

    @@ -588,8 +1029,9 @@

    Part 4: Create the Service

    l0 service get demo-env:guestbook-svc

    If we're quick enough, we'll be able to see the first stage of the process (this is what was output after running the service create command up in Part 4). We should see an asterisk (*) next to the name of the guestbook-dpl:1 deploy, which indicates that the service is in a transitional state:

    -
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    -guestbo9364b  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:1*  0/1
    +
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    +guestbo9364b  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:1*  0/1
    +

    In the next phase of deployment, if we execute the service get command again, we will see (1) in the Scale column; this indicates that 1 copy of the service is transitioning to an active state:

    -
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    -guestbo9364b  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:1*  0/1 (1)
    +
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    +guestbo9364b  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:1*  0/1 (1)
    +

    In the final phase of deployment, we will see 1/1 in the Scale column; this indicates that the service is running 1 copy:

    -
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    -guestbo9364b  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:1   1/1
    +
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    +guestbo9364b  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:1   1/1
    +

    @@ -633,8 +1078,9 @@

    Get the Application's URLAt the command prompt, execute the following:

    l0 loadbalancer get demo-env:guestbook-lb

    We should see output like the following:

    -
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE        PORTS       PUBLIC  URL
    -guestbodb65a     guestbook-lb       demo-env     guestbook-svc  80:80/HTTP  true    <url>
    +
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE        PORTS       PUBLIC  URL
    +guestbodb65a     guestbook-lb       demo-env     guestbook-svc  80:80/HTTP  true    <url>
    +

    Copy the value shown in the URL column and paste it into a web browser. @@ -642,7 +1088,8 @@

    Get the Application's URL

    Logs#

    Output from a Service's docker containers may be acquired by running the following command:

    -
    l0 service logs <SERVICE>
    +
    l0 service logs <SERVICE>
    +

    @@ -697,42 +1144,45 @@

    *.tf: A Brief Aside

    l0 environment create demo-env

    This command is recreated in main.tf like so:

    -
    # walkthrough/deployment-1/main.tf
    +
    # walkthrough/deployment-1/main.tf
     
     resource "layer0_environment" "demo-env" {
         name = "demo-env"
    -}
    +} +

    We've bundled up the heart of the Guestbook deployment (load balancer, deploy, service, etc.) into a Terraform module. To use it, we declare a module block and pass in the source of the module as well as any configuration or variables that the module needs.

    -
    # walkthrough/deployment-1/main.tf
    +
    # walkthrough/deployment-1/main.tf
     
     module "guestbook" {
         source         = "github.com/quintilesims/guides//guestbook/module"
    -    environment_id = "${layer0_environment.demo.id}"
    -}
    + environment_id = "${layer0_environment.demo.id}" +} +

    You can see that we pass in the ID of the environment we create. All variables declared in this block are passed to the module, so the next file we should look at is variables.tf inside of the module to get an idea of what the module is expecting.

    There are a lot of variables here, but only one of them doesn't have a default value.

    -
    # guestbook/module/variables.tf
    +
    # guestbook/module/variables.tf
     
     variable "environment_id" {
         description = "id of the layer0 environment in which to create resources"
    -}
    +} +

    You'll notice that this is the variable that we're passing in. For this particular deployment of the Guestbook, all of the default options are fine. We could override any of them if we wanted to, just by specifying a new value for them back in deployment-1/main.tf.

    Now that we've seen the variables that the module will have, let's take a look at part of module/main.tf and see how some of them might be used:

    -
    # guestbook/module/main.tf
    +
    # guestbook/module/main.tf
     
     resource "layer0_load_balancer" "guestbook-lb" {
    -    name = "${var.load_balancer_name}"
    -    environment = "${var.environment_id}"
    +    name = "${var.load_balancer_name}"
    +    environment = "${var.environment_id}"
         port {
             host_port = 80
             container_port = 80
    @@ -740,7 +1190,8 @@ 

    *.tf: A Brief Aside

    +... +

    You can follow this link to learn more about Layer0 resources in Terraform.

    @@ -750,20 +1201,58 @@

    Part 1: Terraform GetGet: git::https://github.com/quintilesims/guides.git +
    Get: git::https://github.com/quintilesims/guides.git
    +

    We should now have a new local directory called .terraform/. We don't need to do anything with it; we just want to make sure it's there.


    -

    Part 2: Terraform Plan#

    +

    Part 2: Terraform Init#

    +

    This deployment has provider dependencies so an init call must be made. +(Terraform v0.11~ requries init) +At the command prompt, execute the following command:

    +

    terraform init

    +

    We should see output like the following:

    +
    Initializing modules...
    +- module.guestbook
    +
    +Initializing provider plugins...
    +- Checking for available provider plugins on https://releases.hashicorp.com...
    +- Downloading plugin for provider "template" (1.0.0)...
    +
    +The following providers do not have any version constraints in configuration,
    +so the latest version was installed.
    +
    +To prevent automatic upgrades to new major versions that may contain breaking
    +changes, it is recommended to add version = "..." constraints to the
    +corresponding provider blocks in configuration, with the constraint strings
    +suggested below.
    +
    +* provider.template: version = "~> 1.0"
    +
    +Terraform has been successfully initialized!
    +
    +You may now begin working with Terraform. Try running "terraform plan" to see
    +any changes that are required for your infrastructure. All Terraform commands
    +should now work.
    +
    +If you ever set or change modules or backend configuration for Terraform,
    +rerun this command to reinitialize your working directory. If you forget, other
    +commands will detect it and remind you to do so if necessary.
    +
    + + +
    +

    Part 3: Terraform Plan#

    Before we actually create/update/delete any resources, it's a good idea to find out what Terraform intends to do.

    Run terraform plan. Terraform will prompt you for configuration values that it does not have:

    -
    var.endpoint
    +
    var.endpoint
         Enter a value:
     
     var.token
    -    Enter a value:
    + Enter a value: +

    You can find these values by running l0-setup endpoint <your layer0 prefix>.

    @@ -772,7 +1261,7 @@

    Part 2: Terraform PlanThere are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the terraform.tfvars file, or exporting evironment variables like TF_VAR_endpoint and TF_VAR_token, for example). See the Terraform Docs for more.

    The plan command should give us output like the following:

    -
    Refreshing Terraform state in-memory prior to plan...
    +

    This shows you that Terraform intends to create a deploy, an environment, a load balancer, and a service, all through Layer0.

    If you've gone through this deployment using the Layer0 CLI, you may notice that these resources appear out of order - that's fine. Terraform presents these resources in alphabetical order, but underneath, it knows the correct order in which to create them.

    Once we're satisfied that Terraform will do what we want it to do, we can move on to actually making these things exist!


    -

    Part 3: Terraform Apply#

    +

    Part 4: Terraform Apply#

    Run terraform apply to begin the process.

    We should see output like the following:

    -
    layer0_environment.demo: Refreshing state...
    +
    layer0_environment.demo: Refreshing state...
     ...
     ...
     ...
    @@ -846,7 +1336,8 @@ 

    Part 3: Terraform Apply

    +guestbook_url = <http endpoint for the sample application> +
    - - - - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/guides/walkthrough/deployment-2/index.html b/docs/guides/walkthrough/deployment-2/index.html index f76ff890a..98fce60ca 100644 --- a/docs/guides/walkthrough/deployment-2/index.html +++ b/docs/guides/walkthrough/deployment-2/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Walkthrough: Deployment 2 - Layer0 - + @@ -18,443 +18,878 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Walkthrough: Deployment 2 - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - +
    +
    +
    + +
    -
    -
    +
  • + + Part 1: Create the Redis Load Balancer + + +
  • + +
  • + + Part 2: Deploy the ECS Task Definition + + +
  • + +
  • + + Part 3: Create the Redis Service + + +
  • + +
  • + + Part 4: Check the Status of the Redis Service + + +
  • + +
  • + + Part 5: Update the Guestbook Deploy + + +
  • + +
  • + + Part 6: Update the Guestbook Service + + +
  • + +
  • + + Part 7: Prove It + + +
  • + +
  • + + Cleanup + + +
  • + + + + + + +
  • + + Deploy with Terraform + + + + +
  • + + + + + -
    -
    -
    +
    +
    + + -

    Deployment 2: Guestbook + Redis#

    +
    +
    + + + + + +

    Deployment 2: Guestbook + Redis#

    In this section, we're going to add some complexity to the previous deployment. Deployment 1 saw us create a simple guestbook application which kept its data in memory. But what if that ever came down, either by intention or accident? @@ -496,8 +931,9 @@

    Part 1: Create the Redis Load Bal At the command prompt, execute the following:

    l0 loadbalancer create --port 6379:6379/tcp --private --healthcheck-target tcp:6379 demo-env redis-lb

    We should see output like the following:

    -
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE  PORTS          PUBLIC  URL
    -redislb16ae6     redis-lb           demo-env              6378:6379:TCP  false
    +
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE  PORTS          PUBLIC  URL
    +redislb16ae6     redis-lb           demo-env              6378:6379:TCP  false
    +

    The following is a summary of the arguments passed in the above command:

    @@ -515,8 +951,9 @@

    Part 2: Deploy the ECS Task Defin At the command prompt, execute the following:

    l0 deploy create Redis.Dockerrun.aws.json redis-dpl

    We should see output like the following:

    -
    DEPLOY ID    DEPLOY NAME  VERSION
    -redis-dpl.1  redis-dpl    1
    +
    DEPLOY ID    DEPLOY NAME  VERSION
    +redis-dpl.1  redis-dpl    1
    +

    The following is a summary of the arguments passed in the above command:

    @@ -531,8 +968,9 @@

    Part 3: Create the Redis Service

    l0 service create --wait --loadbalancer demo-env:redis-lb demo-env redis-svc redis-dpl:latest

    We should see output like the following:

    -
    SERVICE ID    SERVICE NAME  ENVIRONMENT  LOADBALANCER  DEPLOYMENTS  SCALE
    -redislb16ae6  redis-svc     demo-env     redis-lb      redis-dpl:1  0/1
    +
    SERVICE ID    SERVICE NAME  ENVIRONMENT  LOADBALANCER  DEPLOYMENTS  SCALE
    +redislb16ae6  redis-svc     demo-env     redis-lb      redis-dpl:1  0/1
    +

    The following is a summary of the arguments passed in the above commands:

    @@ -557,8 +995,9 @@

    Part 4: Check the Status o

    Once the service has finished scaling, try looking at the service's logs to see the output that the Redis server creates:

    l0 service logs redis-svc

    Among some warnings and information not important to this exercise and a fun bit of ASCII art, you should see something like the following:

    -
    ... # words and ASCII art
    -1:M 05 Apr 23:29:47.333 * The server is now ready to accept connections on port 6379
    +
    ... # words and ASCII art
    +1:M 05 Apr 23:29:47.333 * The server is now ready to accept connections on port 6379
    +

    Now we just need to teach the Guestbook application how to talk with our Redis service.

    @@ -566,7 +1005,7 @@

    Part 4: Check the Status o

    Part 5: Update the Guestbook Deploy#

    You should see in walkthrough/deployment-2/ another Guestbook.Dockerrun.aws.json file. This file is very similar to but not the same as the one in deployment-1/ - if you open it up, you can see the following additions:

    -
        ...
    +
        ...
         "environment": [
             {
                 "name": "GUESTBOOK_BACKEND_TYPE",
    @@ -577,39 +1016,44 @@ 

    Part 5: Update the Guestbook Deploy< "value": "<redis host and port here>" } ], - ...

    + ... +

    The "GUESTBOOK_BACKEND_CONFIG" variable is what will point the Guestbook application towards the Redis server. The <redis host and port here> section needs to be replaced and populated in the following format:

    -
    "value": "ADDRESS_OF_REDIS_SERVER:PORT_THE_SERVER_IS_SERVING_ON"
    +
    "value": "ADDRESS_OF_REDIS_SERVER:PORT_THE_SERVER_IS_SERVING_ON"
    +

    We already know that Redis is serving on port 6379, so let's go find the server's address. Remember, it lives behind a load balancer that we made, so run the following command:

    l0 loadbalancer get redis-lb

    We should see output like the following:

    -
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE    PORTS          PUBLIC  URL
    -redislb16ae6     redis-lb           demo-env     redis-svc  6379:6379/TCP  false   internal-l0-<yadda-yadda>.elb.amazonaws.com
    +
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE    PORTS          PUBLIC  URL
    +redislb16ae6     redis-lb           demo-env     redis-svc  6379:6379/TCP  false   internal-l0-<yadda-yadda>.elb.amazonaws.com
    +

    Copy that URL value, replace <redis host and port here> with the URL value in Guestbook.Dockerrun.aws.json, append :6379 to it, and save the file. It should look something like the following:

    -
        ...
    +
        ...
         "environment": [
             {
                 "name": "GUESTBOOK_BACKEND_CONFIG",
                 "value": "internal-l0-<yadda-yadda>.elb.amazonaws.com:6379"
             }
         ],
    -    ...
    + ... +

    Now, we can create an updated deploy:

    l0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl

    We should see output like the following:

    -
    DEPLOY ID        DEPLOY NAME    VERSION
    -guestbook-dpl.2  guestbook-dpl  2
    +
    DEPLOY ID        DEPLOY NAME    VERSION
    +guestbook-dpl.2  guestbook-dpl  2
    +

    @@ -618,26 +1062,30 @@

    Part 6: Update the Guestbook Servic Now we just need to apply the new Guestbook deploy to the running Guestbook service:

    l0 service update guestbook-svc guestbook-dpl:latest

    As the Guestbook service moves through the phases of its update process, we should see outputs like the following (if we keep an eye on the service with l0 service get guestbook-svc, that is):

    -
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    +
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
     guestbo5fadd  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:2*  1/1
    -                                                        guestbook-dpl:1
    + guestbook-dpl:1 +

    above: guestbook-dpl:2 is in a transitional state

    -
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS      SCALE
    +
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS      SCALE
     guestbo5fadd  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:2  2/1
    -                                                        guestbook-dpl:1
    + guestbook-dpl:1 +

    above: both versions of the deployment are running at scale

    -
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
    +
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS       SCALE
     guestbo5fadd  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:2   1/1
    -                                                        guestbook-dpl:1*
    + guestbook-dpl:1* +

    above: guestbook-dpl:1 is in a transitional state

    -
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS      SCALE
    -guestbo5fadd  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:2  1/1
    +
    SERVICE ID    SERVICE NAME   ENVIRONMENT  LOADBALANCER  DEPLOYMENTS      SCALE
    +guestbo5fadd  guestbook-svc  demo-env     guestbook-lb  guestbook-dpl:2  1/1
    +

    above: guestbook-dpl:1 has been removed, and only guestbook-dpl:2 remains

    @@ -705,12 +1153,51 @@

    Part 1: Terraform GetPart 2: Terraform Plan#

    +

    Part 2: Terraform Init#

    +

    This deployment has provider dependencies so an init call must be made. +(Terraform v0.11~ requries init) +At the command prompt, execute the following command:

    +

    terraform init

    +

    We should see output like the following:

    +
    Initializing modules...
    +- module.redis
    +  Getting source "github.com/quintilesims/redis//terraform"
    +- module.guestbook
    +  Getting source "github.com/quintilesims/guides//guestbook/module"
    +
    +Initializing provider plugins...
    +- Checking for available provider plugins on https://releases.hashicorp.com...
    +- Downloading plugin for provider "template" (1.0.0)...
    +
    +The following providers do not have any version constraints in configuration,
    +so the latest version was installed.
    +
    +To prevent automatic upgrades to new major versions that may contain breaking
    +changes, it is recommended to add version = "..." constraints to the
    +corresponding provider blocks in configuration, with the constraint strings
    +suggested below.
    +
    +* provider.template: version = "~> 1.0"
    +
    +Terraform has been successfully initialized!
    +
    +You may now begin working with Terraform. Try running "terraform plan" to see
    +any changes that are required for your infrastructure. All Terraform commands
    +should now work.
    +
    +If you ever set or change modules or backend configuration for Terraform,
    +rerun this command to reinitialize your working directory. If you forget, other
    +commands will detect it and remind you to do so if necessary.
    +
    + + +
    +

    Part 3: Terraform Plan#

    It's always a good idea to find out what Terraform intends to do, so let's do that:

    terraform plan

    As before, we'll be prompted for any variables Terraform needs and doesn't have (see the note in Deployment 1 for configuring Terraform variables). We'll see output similar to the following:

    -
    Refreshing Terraform state in-memory prior to plan...
    +
    Refreshing Terraform state in-memory prior to plan...
     The refreshed state will be used to calculate this plan, but will not be
     persisted to local or remote state storage.
     
    @@ -721,16 +1208,16 @@ 

    Part 2: Terraform Plan<computed>" + cluster_count: "<computed>" + links: "<computed>" name: "demo" os: "linux" - security_group_id: "<computed>" + security_group_id: "<computed>" size: "m3.medium" + module.redis.layer0_deploy.redis @@ -738,8 +1225,8 @@

    Part 2: Terraform Plan${var.environment_id}" + health_check.#: "<computed>" name: "redis" port.#: "1" port.1072619732.certificate: "" @@ -747,53 +1234,54 @@

    Part 2: Terraform Plan<computed>" + module.redis.layer0_service.redis - deploy: "${ var.deploy_id == \"\" ? layer0_deploy.redis.id : var.deploy_id }" - environment: "${var.environment_id}" - load_balancer: "${layer0_load_balancer.redis.id}" + deploy: "${ var.deploy_id == \"\" ? layer0_deploy.redis.id : var.deploy_id }" + environment: "${var.environment_id}" + load_balancer: "${layer0_load_balancer.redis.id}" name: "redis" scale: "1" wait: "true" -<= module.guestbook.data.template_file.guestbook - rendered: "<computed>" - template: "{\n \"AWSEBDockerrunVersion\": 2,\n \"containerDefinitions\": [\n {\n \"name\": \"guestbook\",\n \"image\": \"quintilesims/guestbook\",\n \"essential\": true,\n \"memory\": 128,\n \"environment\": [\n {\n \"name\": \"GUESTBOOK_BACKEND_TYPE\",\n \"value\": \"${backend_type}\"\n },\n {\n \"name\": \"GUESTBOOK_BACKEND_CONFIG\",\n \"value\": \"${backend_config}\"\n },\n {\n \"name\": \"AWS_ACCESS_KEY_ID\",\n \"value\": \"${access_key}\"\n },\n {\n \"name\": \"AWS_SECRET_ACCESS_KEY\",\n \"value\": \"${secret_key}\"\n },\n {\n \"name\": \"AWS_REGION\",\n \"value\": \"${region}\"\n }\n ],\n \"portMappings\": [\n {\n \"hostPort\": 80,\n \"containerPort\": 80\n }\n ]\n }\n ]\n}\n" - vars.%: "<computed>" +<= module.guestbook.data.template_file.guestbook + rendered: "<computed>" + template: "{\n \"AWSEBDockerrunVersion\": 2,\n \"containerDefinitions\": [\n {\n \"name\": \"guestbook\",\n \"image\": \"quintilesims/guestbook\",\n \"essential\": true,\n \"memory\": 128,\n \"environment\": [\n {\n \"name\": \"GUESTBOOK_BACKEND_TYPE\",\n \"value\": \"${backend_type}\"\n },\n {\n \"name\": \"GUESTBOOK_BACKEND_CONFIG\",\n \"value\": \"${backend_config}\"\n },\n {\n \"name\": \"AWS_ACCESS_KEY_ID\",\n \"value\": \"${access_key}\"\n },\n {\n \"name\": \"AWS_SECRET_ACCESS_KEY\",\n \"value\": \"${secret_key}\"\n },\n {\n \"name\": \"AWS_REGION\",\n \"value\": \"${region}\"\n }\n ],\n \"portMappings\": [\n {\n \"hostPort\": 80,\n \"containerPort\": 80\n }\n ]\n }\n ]\n}\n" + vars.%: "<computed>" + module.guestbook.layer0_deploy.guestbook - content: "${data.template_file.guestbook.rendered}" + content: "${data.template_file.guestbook.rendered}" name: "guestbook" + module.guestbook.layer0_load_balancer.guestbook - environment: "${var.environment_id}" - health_check.#: "<computed>" + environment: "${var.environment_id}" + health_check.#: "<computed>" name: "guestbook" port.#: "1" port.2027667003.certificate: "" port.2027667003.container_port: "80" port.2027667003.host_port: "80" port.2027667003.protocol: "http" - url: "<computed>" + url: "<computed>" + module.guestbook.layer0_service.guestbook - deploy: "${ var.deploy_id == \"\" ? layer0_deploy.guestbook.id : var.deploy_id }" - environment: "${var.environment_id}" - load_balancer: "${layer0_load_balancer.guestbook.id}" + deploy: "${ var.deploy_id == \"\" ? layer0_deploy.guestbook.id : var.deploy_id }" + environment: "${var.environment_id}" + load_balancer: "${layer0_load_balancer.guestbook.id}" name: "guestbook" scale: "2" wait: "true" -Plan: 7 to add, 0 to change, 0 to destroy.

    +Plan: 7 to add, 0 to change, 0 to destroy. +

    We should see that Terraform intends to add 7 new resources, some of which are for the Guestbook deployment and some of which are for the Redis deployment.


    -

    Part 2: Terraform Apply#

    +

    Part 4: Terraform Apply#

    Run terraform apply, and we should see output similar to the following:

    -
    data.template_file.redis: Refreshing state...
    +
    data.template_file.redis: Refreshing state...
     layer0_deploy.redis-dpl: Creating...
     
     ...
    @@ -813,7 +1301,8 @@ 

    Part 2: Terraform Apply

    +guestbook_url = <http endpoint for the sample application> +
    - - - - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/guides/walkthrough/deployment-3/index.html b/docs/guides/walkthrough/deployment-3/index.html deleted file mode 100644 index a0f1edc7a..000000000 --- a/docs/guides/walkthrough/deployment-3/index.html +++ /dev/null @@ -1,885 +0,0 @@ - - - - - - - - - - - - Walkthrough: Deployment 3 - Layer0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    -
    - - - -
    - -
    -
    - -
    - -
    -
    -
    - -

    Deployment 3: Guestbook + Redis + Consul#

    -

    In Deployment 2, we created two services in the same environment and linked them together manually. -While that can work for a small system, it's not really feasible for a system with a lot of moving parts - we would need to look up load balancer endpoints for all of our services and manually link them all together. -To that end, here we're going to to redeploy our two-service system using Consul, a service discovery tool.

    -

    For this deployment, we'll create a cluster of Consul servers which will be responsible for keeping track of the state of our system. -We'll also deploy new versions of the Guestbook and Redis task definition files - in addition to creating a container for its respective application, each task definition creates two other containers:

    -
      -
    • a container for a Consul agent, which is in charge of communicating with the Consul server cluster
    • -
    • a container for Registrator, which is charge of talking to the local Consul agent when a service comes up or goes down.
    • -
    -

    You can choose to complete this section using either the Layer0 CLI or Terraform.

    -

    Deploy with Layer0 CLI#

    -

    If you're following along, you'll want to be working in the walkthrough/deployment-3/ directory of your clone of the guides repo.

    -

    Files used in this deployment:

    - - - - - - - - - - - - - - - - - - - - - -
    FilenamePurpose
    CLI.Consul.Dockerrun.aws.jsonTemplate for running a Consul server
    CLI.Guestbook.Dockerrun.aws.jsonTemplate for running the Guestbook application with Registrator and Consul agent
    CLI.Redis.Dockerrun.aws.jsonTemplate for running a Redis server with Registrator and Consul agent
    -
    -

    Part 1: Create the Consul Load Balancer#

    -

    The Consul server cluster will live in the same environment as our Guestbook and Redis services - if you've completed Deployment 1 and Deployment 2, this environment already exists as demo-env. -We'll start by creating a load balancer for the Consul cluster. -The load balancer will be private since only Layer0 services need to communicate with the Consul cluster. -At the command prompt, execute the following:

    -

    l0 loadbalancer create --port 8500:8500/tcp --port 8301:8301/tcp --private --healthcheck-target tcp:8500 demo-env consul-lb

    -

    We should see output like the following:

    -
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE  PORTS          PUBLIC  URL
    -consull66b23     consul-lb          consul-env            8500:8500/TCP  false
    -                                                          8301:8301/TCP
    - - -

    The following is a summary of the arguments passed in the above command:

    -
      -
    • loadbalancer create: creates a new load balancer
    • -
    • --port 8500:8500/tcp: instructs the load balancer to forward requests from port 8500 on the load balancer to port 8500 in the EC2 instance using the TCP protocol
    • -
    • --port 8301:8301/tcp: instructs the load balancer to forward requests from port 8301 on the load balancer to port 8301 in the EC2 instance using the TCP protocol
    • -
    • --private: instructs the load balancer to ignore outside traffic
    • -
    • --healthcheck-target: instructs the load balancer to use a TCP ping on port 8500 as the basis for deciding whether the service is healthy
    • -
    • demo-env: the name of the environment in which the load balancer is being created
    • -
    • consul-lb: a name for the load balancer itself
    • -
    -

    While we're touching on the Consul load balancer, we should grab its URL - this is the one value that we'll need to know in order to deploy the rest of our system, no matter how large it may get. -At the command prompt, execute the following:

    -

    l0 loadbalancer get consul-lb

    -

    We should see output that looks like the output we just received above after creating the load balancer, but this time there is something in the URL column. -That URL is the value we're looking for. -Make note of it for when we reference it later.

    -
    -

    Part 2: Deploy the Consul Task Definition#

    -

    Before we can create the deploy, we need to supply the URL of the Consul load balancer that we got in Part 1. -In CLI.Consul.Dockerrun.aws.json, find the entry in the environment block that looks like this:

    -
    {
    -    "name": "CONSUL_SERVER_URL",
    -    "value": ""
    -}
    - - -

    Update the "value" with the Consul load balancer's URL into and save the file. -We can then create the deploy. -At the command prompt, execute the following:

    -

    l0 deploy create CLI.Consul.Dockerrun.aws.json consul-dpl

    -

    We should see output like the following:

    -
    DEPLOY ID     DEPLOY NAME  VERSION
    -consul-dpl.1  consul-dpl   1
    - - -

    The following is a summary of the arguments passed in the above command:

    -
      -
    • deploy create: creates a new Layer0 Deploy and allows you to specifiy an ECS task definition
    • -
    • CLI.Consul.Dockerrun.aws.json: the file name of the ECS task definition (use the full path of the file if it is not in the current working directory)
    • -
    • consul-dpl: a name for the deploy, which will later be used in creating the service
    • -
    -
    -

    Part 3: Create the Consul Service#

    -

    Here, we pull the previous resources together to create a service. -At the command prompt, execute the following:

    -

    l0 service create --wait --loadbalancer demo-env:consul-lb demo-env consul-svc consul-dpl:latest

    -

    We should see output like the following:

    -
    Waiting for Deployment...
    -SERVICE ID    SERVICE NAME  ENVIRONMENT  LOADBALANCER  DEPLOYMENTS   SCALE
    -consuls2f3c6  consul-svc    demo-env     consul-lb     consul-dpl:1  1/1
    - - -

    The following is a summary of the arguments passed in the above commands:

    -
      -
    • service create: creates a new Layer0 Service
    • -
    • --wait: instructs the CLI to keep hold of the shell until the service has been successfully deployed
    • -
    • --loadbalancer demo-env:consul-lb: the fully-qualified name of the load balancer behind which the service should live; in this case, the load balancer named consul-lb in the environment named demo-env
    • -
    • demo-env: the name of the environment in which the service is to reside
    • -
    • consul-svc: a name for the service itself
    • -
    • consul-dpl:latest: the name and version of the deploy that the service should put into action
    • -
    -

    Once the service has finished being deployed (and --wait has returned our shell to us), we need to scale the service.

    -

    Currently, we only have one Consul server running in the cluster. -For best use, we should have at least 3 servers running (see this link for more details on Consul servers and their concensus protocol). -Indeed, if we inspect the command block of the task definition file, we can find the following parameter: -bootstrap-expect=3. -This tells the Consul server that we have just deployed that it should be expecting a total of three servers. -We still need to fulfill that expectation, so we'll scale our service up to three. -At the command prompt, execute the following:

    -

    l0 service scale --wait consul-svc 3

    -

    We should see output like the following:

    -
    Waiting for Deployment...
    -SERVICE ID    SERVICE NAME  ENVIRONMENT  LOADBALANCER  DEPLOYMENTS   SCALE
    -consuls2f3c6  consul-svc    demo-env     consul-lb     consul-dpl:1  3/3
    - - -
    -

    Important!

    -

    The successful completion of the scale command doesn't mean that we're ready to move on just yet! -We need to check in on the logs (l0 service logs consul-svc) until we can confirm that all three of the Consul servers have synced up with each other. -Each consul-server section in the logs should be ending with consul: Adding LAN server [ip address] or agent: Join completed. -If you see one of the sections ending with agent: Join failed, retrying in 30s, you need to wait for that server to join the cluster before continuing.

    -
    -
    -

    Part 4: Update and Redeploy the Redis and Guestbook Applications#

    -

    We're going to need the URL of the Consul load balancer again. -In each of the CLI.Redis and CLI.Guestbook task definition files, look for the CONSUL_SERVER_URL block in the consul-agent container and populate the value field with the Consul load balancer's URL, then save the file. -At the command prompt, execute the two following commands to create new versions of the deploys for the Redis and Guestbook applications:

    -

    l0 deploy create CLI.Redis.Dockerrun.aws.json redis-dpl

    -

    l0 deploy create CLI.Guestbook.Dockerrun.aws.json guestbook-dpl

    -

    Then, execute the two following commands to redeploy the existing Redis and Guestbook services using those new deploys:

    -

    l0 service update --wait redis-svc redis-dpl:latest

    -

    l0 service update --wait guestbook-svc guestbook-dpl:latest

    -
    -

    Note

    -

    Here, we should run l0 service logs consul-svc again and confirm that the Consul cluster has discovered these two services.

    -
    -

    We can use l0 loadbalancer get guestbook-lb to obtain the guestbook application's URL, and then navigate to it with a web browser. -Our guestbook app should be up and running - this time, it's been deployed without needing to know the address of the Redis backend!

    -

    Of course, this is a simple example; in both this deployment and Deployment 2, we needed to use l0 loadbalancer get to obtain the URL of a load balancer. -However, in a system with many services that uses Consul like this example, we only ever need to find the URL of the Consul cluster - not the URLs of every service that needs to talk to another of our services.

    -
    -

    Part 5: Inspect the Consul Universe (Optional)#

    -

    Let's take a glimpse into how this system that we've deployed works. -This requires that we have access to the key pair we've told Layer0 about when we set it up.

    -

    Open Ports for SSH#

    -

    We want to SSH into the Guestbook EC2 instance, which means that we need to tell the Guestbook load balancer to allow SSH traffic through. -At the command prompt, execute the following:

    -

    l0 loadbalancer addport guestbook-lb 22:22/tcp

    -

    We should see output like the following:

    -
    LOADBALANCER ID  LOADBALANCER NAME  ENVIRONMENT  SERVICE        PORTS       PUBLIC  URL
    -guestbodb65a     guestbook-lb       demo-env     guestbook-svc  80:80/HTTP  true    <url>
    -                                                                22:22/TCP
    - - -

    We need to take note of the load balancer's URL here, too.

    -

    SSH Into the Instance#

    -

    At the command prompt, execute the following:

    -

    ssh -i /path/to/keypair ec2-user@<guestbook_load_balancer_url> -o ServerAliveInterval=30

    -

    (We'll probably be asked if we want to continue connecting - we do, so we'll enter yes.)

    -

    Summary of arguments passed into the above command:

    -
      -
    • -i /path/to/keypair: this allows us to specify an identity file for use when connecting to the remote machine - in this case, we want to replace /path/to/keypair with the actual path to the keypair we created when we set up Layer0
    • -
    • ec2-user@<guestbook_load_balancer_url>: the address (here we want to replace <guestbook_load_balancer_url> with the actual URL of the guestbook load balancer) of the machine to which we want to connect and the name of the user (ec2-user) that we'd like to connect as
    • -
    • -o: allows us to set parameters on the ssh command
    • -
    • ServerAliveInterval=30: one of those ssh parameters - AWS imposes an automatic disconnect if a connection is not active for a certain amount of time, so we use this option to ping every 30 seconds to prevent that automatic disconnect
    • -
    -

    Look Around You#

    -

    We're now inside of the EC2 instance! -If we run docker ps, we should see that our three Docker containers (the Guestbook app, a Consul agent, and Registrator) are up and running, as well as an amazon-ecs-agent image. -But that's not the Consul universe that we came here to see. -At the EC2 instance's command prompt, execute the following:

    -

    echo $(curl -s localhost:8500/v1/catalog/services) | jq '.'

    -

    We should see output like the following:

    -
    {
    -  "consul": [],
    -  "consul-8301": [
    -    "udp"
    -  ],
    -  "consul-8500": [],
    -  "consul-8600": [
    -    "udp"
    -  ],
    -  "guestbook-redis": [],
    -  "redis": []
    -}
    - - -

    Summary of commands passed in the above command:

    -
      -
    • curl -s localhost:8500/v1/catalog/services: use curl to send a GET request to the specified URL, where localhost:8500 is an HTTP connection to the local Consul agent in this EC2 instance (the -s flag just silences excess output from curl)
    • -
    • | jq '.': use a pipe (|) to take whatever returns from the left side of the pipe and pass it to the jq program, which we use here simply to pretty-print the JSON response
    • -
    • echo $(...): print out whatever returns from running the stuff inside of the parens; not necessary, but it gives us a nice newline after we get our response
    • -
    -

    In that output, we can see all of the things that our local Consul agent knows about. -In addition to a few connections to the Consul server cluster, we can see that it knows about the Guestbook application running in this EC2 instance, as well as the Redis application running in a different instance with its own Consul agent and Registrator.

    -

    Let's take a closer look at the Redis service and see how our Guestbook application is locating our Redis application. -At the EC2 instance's command prompt, execute the following:

    -

    echo $(curl -s http://localhost:8500/v1/catalog/service/redis) | jq '.'

    -

    We should see output like the following:

    -
    [
    -  {
    -    "ID": "b4bb81e6-fe6a-c630-2553-7f6492ae5275",
    -    "Node": "ip-10-100-230-97.us-west-2.compute.internal",
    -    "Address": "10.100.230.97",
    -    "Datacenter": "dc1",
    -    "TaggedAddresses": {
    -      "lan": "10.100.230.97",
    -      "wan": "10.100.230.97"
    -    },
    -    "NodeMeta": {},
    -    "ServiceID": "562aceee6935:ecs-l0-tlakedev-redis-dpl-20-redis-e0f989e5af97cdfd0e00:6379",
    -    "ServiceName": "redis",
    -    "ServiceTags": [],
    -    "ServiceAddress": "10.100.230.97",
    -    "ServicePort": 6379,
    -    "ServiceEnableTagOverride": false,
    -    "CreateIndex": 761,
    -    "ModifyIndex": 761
    -  }
    -]
    - - -

    To really see how the Guestbook application connects to Redis, we can take an even closer look!

    -

    Run docker ps to generate a listing of all the containers that Docker is running on the EC2 instance, and note the Container ID for the Guestbook container. Then run the following command to connect to the Guestbook container:

    -

    docker exec -it [container_id] /bin/sh

    -

    Once we've gotten inside the container, we'll run a similar command to the previous curl:

    -

    curl -s consul-agent:8500/v1/catalog/service/redis

    -

    Our Guestbook application makes a call like this one and figures out how to connect to the Redis service by mushing together the information from the ServiceAddress and ServicePort fields!

    -

    To close the ssh connection to the EC2 instance, run exit in the command prompt.

    -
    -

    Cleanup#

    -

    When you're finished with the example, we can instruct Layer0 to terminate the applications and delete the environment.

    -

    l0 environment delete demo-env

    -
    -

    Deploy with Terraform#

    -

    As before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI. -As before, we will assume that you've cloned the guides repo and are working in the iterative-walkthrough/deployment-3/ directory.

    -

    We'll use these files to manage our deployment with Terraform:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FilenamePurpose
    Guestbook.Dockerrun.aws.jsonTemplate for running the Guestbook application
    main.tfProvisions resources; populates variables in template files
    outputs.tfValues that Terraform will yield during deployment
    Redis.Dockerrun.aws.jsonTemplate for running the Redis application
    terraform.tfstateTracks status of deployment (created and managed by Terraform)
    terraform.tfvarsVariables specific to the environment and application(s)
    variables.tfValues that Terraform will use during deployment
    -
    -

    *.tf: A Brief Aside: Revisited: Redux#

    -

    In looking at main.tf, you can see that we're pulling in a Consul module that we maintain (here's the repo); this removes the need for a local task definition file.

    -

    We also are continuing to use modules for Redis and Guestbook. -However, instead of just sourcing the module and passing in a value or two, you can see that we actually create new deploys from local task definition files and pass those deploys in to the module. -This design allows us to use pre-made modules while also offering a great deal of flexibility. -If you'd like to follow along the Redis deployment logic chain (the other applications/services work similarly), it goes something like this:

    -
      -
    • main.tf creates a deploy for the Redis server by rendering a local task definition and populating it with certain values
    • -
    • main.tf passes the ID of the deploy into the Redis module, along with other values the module requires
    • -
    • the Redis module pulls all the variables it knows about (both the defaults in variables.tf as well as the ones passed in)
    • -
    • among other Layer0/AWS resources, the module spins up a Redis service; since a deploy ID has been provided, it uses that deploy to create the service instead of a deploy made from a default task definition contained within the module
    • -
    -
    -

    Part 1: Terraform Get#

    -

    Run terraform get to pull down all the source materials Terraform needs for our deployment.

    -
    -

    Part 2: Terraform Plan#

    -

    As before, we can run terraform plan to see what's going to happen. -We should see that there are 12 new resources to be created:

    -
      -
    • the environment
    • -
    • the two local deploys which will be used for Guestbook and Redis
    • -
    • the load balancer, deploy, and service from each of the Consul, Guestbook, and Redis modules
        -
      • note that even though the default modules' deploys are created, they won't actually be used to deploy services
      • -
      -
    • -
    -
    -

    Part 3: Terraform Apply#

    -

    Run terraform apply, and we should see output similar to the following:

    -
    data.template_file.consul: Refreshing state...
    -layer0_deploy.consul-dpl: Creating...
    -
    -...
    -...
    -...
    -
    -layer0_service.guestbook-svc: Creation complete
    -
    -Apply complete! Resources: 10 added, 0 changed, 0 destroyed.
    -
    -The state of your infrastructure has been saved to the path
    -below. This state is required to modify and destroy your
    -infrastructure, so keep it safe. To inspect the complete state
    -use the `terraform show` command.
    -
    -State path: terraform.tfstate
    -
    -Outputs:
    -
    -guestbook_url = <http endpoint for the guestbook application>
    - - -
    -

    Note

    -

    It may take a few minutes for the guestbook service to launch and the load balancer to become available. -During that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.

    -
    -

    What's Happening#

    -

    Terraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment. -Terraform also writes the state of your deployment to the terraform.tfstate file (creating a new one if it's not already there).

    -

    Cleanup#

    -

    When you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application. -Execute the following command (in the same directory):

    -

    terraform destroy

    -

    It's also now safe to remove the .terraform/ directory and the *.tfstate* files.

    -
    - - - - -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    - - - - - - \ No newline at end of file diff --git a/docs/guides/walkthrough/intro/index.html b/docs/guides/walkthrough/intro/index.html index 8fc4f91ed..115f1ae31 100644 --- a/docs/guides/walkthrough/intro/index.html +++ b/docs/guides/walkthrough/intro/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Walkthrough: Introduction - Layer0 - + @@ -18,427 +18,589 @@ + + + + + + + + + + + + + + + + + + - - - - - - + + Walkthrough: Introduction - Layer0 + - + + + + - - - - - + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    An Iterative Walkthrough#

    +
    +
    + + + + + +

    An Iterative Walkthrough#

    This guide aims to take you through three increasingly-complex deployment examples using Layer0. Successive sections build upon the previous ones, and each deployment can be completed either through the Layer0 CLI directly, or through Terraform using our custom Layer0 Terraform Provider.

    We assume that you're using Layer0 v0.9.0 or later. @@ -455,79 +617,82 @@

    An Iterative WalkthroughDeployment 3: Deploying Guestbook, Redis, and a service discovery service (Consul)
    - - -

    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index af9114953..acc589301 100644 --- a/docs/index.html +++ b/docs/index.html @@ -1,16 +1,14 @@ + + + + - - - - - + - - - - Layer0 - Layer0 Documentation - + + + @@ -20,446 +18,662 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    Layer0 Logo

    +
    +
    + + + + + +

    + Layer0 Logo +

    + +

    Build, Manage, and Deploy Your Application#

    +

    Meet Layer0#

    Layer0 is a framework that helps you deploy web applications to the cloud with minimal fuss. Using a simple command line interface (CLI), you can manage the entire life cycle of your application without having to focus on infrastructure.

    Ready to learn more about Layer0? See our introduction page to learn about some important concepts. When you're ready to get started, take a look at the installation page for information about setting up Layer0.

    @@ -467,83 +681,88 @@

    DownloadDownload v0.10.4 + + -Darwin -Linux -Windows +Darwin +Linux +Windows -macOS -Linux -Windows +macOS +Linux +Windows

    Contact Us#

    If you have questions about Layer0, email the development team at carbon@us.imshealth.com.

    - - -
    + + + -
    +
    + + - - + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/mkdocs/js/lunr.min.js b/docs/mkdocs/js/lunr.min.js deleted file mode 100644 index b0198dff9..000000000 --- a/docs/mkdocs/js/lunr.min.js +++ /dev/null @@ -1,7 +0,0 @@ -/** - * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 0.7.0 - * Copyright (C) 2016 Oliver Nightingale - * MIT Licensed - * @license - */ -!function(){var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.7.0",t.utils={},t.utils.warn=function(t){return function(e){t.console&&console.warn&&console.warn(e)}}(this),t.utils.asString=function(t){return void 0===t||null===t?"":t.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var t=Array.prototype.slice.call(arguments),e=t.pop(),n=t;if("function"!=typeof e)throw new TypeError("last argument must be a function");n.forEach(function(t){this.hasHandler(t)||(this.events[t]=[]),this.events[t].push(e)},this)},t.EventEmitter.prototype.removeListener=function(t,e){if(this.hasHandler(t)){var n=this.events[t].indexOf(e);this.events[t].splice(n,1),this.events[t].length||delete this.events[t]}},t.EventEmitter.prototype.emit=function(t){if(this.hasHandler(t)){var e=Array.prototype.slice.call(arguments,1);this.events[t].forEach(function(t){t.apply(void 0,e)})}},t.EventEmitter.prototype.hasHandler=function(t){return t in this.events},t.tokenizer=function(e){return arguments.length&&null!=e&&void 0!=e?Array.isArray(e)?e.map(function(e){return t.utils.asString(e).toLowerCase()}):e.toString().trim().toLowerCase().split(t.tokenizer.seperator):[]},t.tokenizer.seperator=/[\s\-]+/,t.tokenizer.load=function(t){var e=this.registeredFunctions[t];if(!e)throw new Error("Cannot load un-registered function: "+t);return e},t.tokenizer.label="default",t.tokenizer.registeredFunctions={"default":t.tokenizer},t.tokenizer.registerFunction=function(e,n){n in this.registeredFunctions&&t.utils.warn("Overwriting existing tokenizer: "+n),e.label=n,this.registeredFunctions[n]=e},t.Pipeline=function(){this._stack=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in this.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[e.label]=e},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.registeredFunctions[e];if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._stack.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._stack.indexOf(e);if(-1==i)throw new Error("Cannot find existingFn");i+=1,this._stack.splice(i,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._stack.indexOf(e);if(-1==i)throw new Error("Cannot find existingFn");this._stack.splice(i,0,n)},t.Pipeline.prototype.remove=function(t){var e=this._stack.indexOf(t);-1!=e&&this._stack.splice(e,1)},t.Pipeline.prototype.run=function(t){for(var e=[],n=t.length,i=this._stack.length,r=0;n>r;r++){for(var o=t[r],s=0;i>s&&(o=this._stack[s](o,r,t),void 0!==o&&""!==o);s++);void 0!==o&&""!==o&&e.push(o)}return e},t.Pipeline.prototype.reset=function(){this._stack=[]},t.Pipeline.prototype.toJSON=function(){return this._stack.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Vector=function(){this._magnitude=null,this.list=void 0,this.length=0},t.Vector.Node=function(t,e,n){this.idx=t,this.val=e,this.next=n},t.Vector.prototype.insert=function(e,n){this._magnitude=void 0;var i=this.list;if(!i)return this.list=new t.Vector.Node(e,n,i),this.length++;if(en.idx?n=n.next:(i+=e.val*n.val,e=e.next,n=n.next);return i},t.Vector.prototype.similarity=function(t){return this.dot(t)/(this.magnitude()*t.magnitude())},t.SortedSet=function(){this.length=0,this.elements=[]},t.SortedSet.load=function(t){var e=new this;return e.elements=t,e.length=t.length,e},t.SortedSet.prototype.add=function(){var t,e;for(t=0;t1;){if(o===t)return r;t>o&&(e=r),o>t&&(n=r),i=n-e,r=e+Math.floor(i/2),o=this.elements[r]}return o===t?r:-1},t.SortedSet.prototype.locationFor=function(t){for(var e=0,n=this.elements.length,i=n-e,r=e+Math.floor(i/2),o=this.elements[r];i>1;)t>o&&(e=r),o>t&&(n=r),i=n-e,r=e+Math.floor(i/2),o=this.elements[r];return o>t?r:t>o?r+1:void 0},t.SortedSet.prototype.intersect=function(e){for(var n=new t.SortedSet,i=0,r=0,o=this.length,s=e.length,a=this.elements,h=e.elements;;){if(i>o-1||r>s-1)break;a[i]!==h[r]?a[i]h[r]&&r++:(n.add(a[i]),i++,r++)}return n},t.SortedSet.prototype.clone=function(){var e=new t.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},t.SortedSet.prototype.union=function(t){var e,n,i;this.length>=t.length?(e=this,n=t):(e=t,n=this),i=e.clone();for(var r=0,o=n.toArray();rp;p++)c[p]===a&&d++;h+=d/f*l.boost}}this.tokenStore.add(a,{ref:o,tf:h})}n&&this.eventEmitter.emit("add",e,this)},t.Index.prototype.remove=function(t,e){var n=t[this._ref],e=void 0===e?!0:e;if(this.documentStore.has(n)){var i=this.documentStore.get(n);this.documentStore.remove(n),i.forEach(function(t){this.tokenStore.remove(t,n)},this),e&&this.eventEmitter.emit("remove",t,this)}},t.Index.prototype.update=function(t,e){var e=void 0===e?!0:e;this.remove(t,!1),this.add(t,!1),e&&this.eventEmitter.emit("update",t,this)},t.Index.prototype.idf=function(t){var e="@"+t;if(Object.prototype.hasOwnProperty.call(this._idfCache,e))return this._idfCache[e];var n=this.tokenStore.count(t),i=1;return n>0&&(i=1+Math.log(this.documentStore.length/n)),this._idfCache[e]=i},t.Index.prototype.search=function(e){var n=this.pipeline.run(this.tokenizerFn(e)),i=new t.Vector,r=[],o=this._fields.reduce(function(t,e){return t+e.boost},0),s=n.some(function(t){return this.tokenStore.has(t)},this);if(!s)return[];n.forEach(function(e,n,s){var a=1/s.length*this._fields.length*o,h=this,u=this.tokenStore.expand(e).reduce(function(n,r){var o=h.corpusTokens.indexOf(r),s=h.idf(r),u=1,l=new t.SortedSet;if(r!==e){var c=Math.max(3,r.length-e.length);u=1/Math.log(c)}o>-1&&i.insert(o,a*s*u);for(var f=h.tokenStore.get(r),d=Object.keys(f),p=d.length,v=0;p>v;v++)l.add(f[d[v]].ref);return n.union(l)},new t.SortedSet);r.push(u)},this);var a=r.reduce(function(t,e){return t.intersect(e)});return a.map(function(t){return{ref:t,score:i.similarity(this.documentVector(t))}},this).sort(function(t,e){return e.score-t.score})},t.Index.prototype.documentVector=function(e){for(var n=this.documentStore.get(e),i=n.length,r=new t.Vector,o=0;i>o;o++){var s=n.elements[o],a=this.tokenStore.get(s)[e].tf,h=this.idf(s);r.insert(this.corpusTokens.indexOf(s),a*h)}return r},t.Index.prototype.toJSON=function(){return{version:t.version,fields:this._fields,ref:this._ref,tokenizer:this.tokenizerFn.label,documentStore:this.documentStore.toJSON(),tokenStore:this.tokenStore.toJSON(),corpusTokens:this.corpusTokens.toJSON(),pipeline:this.pipeline.toJSON()}},t.Index.prototype.use=function(t){var e=Array.prototype.slice.call(arguments,1);e.unshift(this),t.apply(this,e)},t.Store=function(){this.store={},this.length=0},t.Store.load=function(e){var n=new this;return n.length=e.length,n.store=Object.keys(e.store).reduce(function(n,i){return n[i]=t.SortedSet.load(e.store[i]),n},{}),n},t.Store.prototype.set=function(t,e){this.has(t)||this.length++,this.store[t]=e},t.Store.prototype.get=function(t){return this.store[t]},t.Store.prototype.has=function(t){return t in this.store},t.Store.prototype.remove=function(t){this.has(t)&&(delete this.store[t],this.length--)},t.Store.prototype.toJSON=function(){return{store:this.store,length:this.length}},t.stemmer=function(){var t={ational:"ate",tional:"tion",enci:"ence",anci:"ance",izer:"ize",bli:"ble",alli:"al",entli:"ent",eli:"e",ousli:"ous",ization:"ize",ation:"ate",ator:"ate",alism:"al",iveness:"ive",fulness:"ful",ousness:"ous",aliti:"al",iviti:"ive",biliti:"ble",logi:"log"},e={icate:"ic",ative:"",alize:"al",iciti:"ic",ical:"ic",ful:"",ness:""},n="[^aeiou]",i="[aeiouy]",r=n+"[^aeiouy]*",o=i+"[aeiou]*",s="^("+r+")?"+o+r,a="^("+r+")?"+o+r+"("+o+")?$",h="^("+r+")?"+o+r+o+r,u="^("+r+")?"+i,l=new RegExp(s),c=new RegExp(h),f=new RegExp(a),d=new RegExp(u),p=/^(.+?)(ss|i)es$/,v=/^(.+?)([^s])s$/,g=/^(.+?)eed$/,m=/^(.+?)(ed|ing)$/,y=/.$/,S=/(at|bl|iz)$/,w=new RegExp("([^aeiouylsz])\\1$"),k=new RegExp("^"+r+i+"[^aeiouwxy]$"),x=/^(.+?[^aeiou])y$/,b=/^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/,E=/^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/,F=/^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/,_=/^(.+?)(s|t)(ion)$/,z=/^(.+?)e$/,O=/ll$/,P=new RegExp("^"+r+i+"[^aeiouwxy]$"),T=function(n){var i,r,o,s,a,h,u;if(n.length<3)return n;if(o=n.substr(0,1),"y"==o&&(n=o.toUpperCase()+n.substr(1)),s=p,a=v,s.test(n)?n=n.replace(s,"$1$2"):a.test(n)&&(n=n.replace(a,"$1$2")),s=g,a=m,s.test(n)){var T=s.exec(n);s=l,s.test(T[1])&&(s=y,n=n.replace(s,""))}else if(a.test(n)){var T=a.exec(n);i=T[1],a=d,a.test(i)&&(n=i,a=S,h=w,u=k,a.test(n)?n+="e":h.test(n)?(s=y,n=n.replace(s,"")):u.test(n)&&(n+="e"))}if(s=x,s.test(n)){var T=s.exec(n);i=T[1],n=i+"i"}if(s=b,s.test(n)){var T=s.exec(n);i=T[1],r=T[2],s=l,s.test(i)&&(n=i+t[r])}if(s=E,s.test(n)){var T=s.exec(n);i=T[1],r=T[2],s=l,s.test(i)&&(n=i+e[r])}if(s=F,a=_,s.test(n)){var T=s.exec(n);i=T[1],s=c,s.test(i)&&(n=i)}else if(a.test(n)){var T=a.exec(n);i=T[1]+T[2],a=c,a.test(i)&&(n=i)}if(s=z,s.test(n)){var T=s.exec(n);i=T[1],s=c,a=f,h=P,(s.test(i)||a.test(i)&&!h.test(i))&&(n=i)}return s=O,a=c,s.test(n)&&a.test(n)&&(s=y,n=n.replace(s,"")),"y"==o&&(n=o.toLowerCase()+n.substr(1)),n};return T}(),t.Pipeline.registerFunction(t.stemmer,"stemmer"),t.generateStopWordFilter=function(t){var e=t.reduce(function(t,e){return t[e]=e,t},{});return function(t){return t&&e[t]!==t?t:void 0}},t.stopWordFilter=t.generateStopWordFilter(["a","able","about","across","after","all","almost","also","am","among","an","and","any","are","as","at","be","because","been","but","by","can","cannot","could","dear","did","do","does","either","else","ever","every","for","from","get","got","had","has","have","he","her","hers","him","his","how","however","i","if","in","into","is","it","its","just","least","let","like","likely","may","me","might","most","must","my","neither","no","nor","not","of","off","often","on","only","or","other","our","own","rather","said","say","says","she","should","since","so","some","than","that","the","their","them","then","there","these","they","this","tis","to","too","twas","us","wants","was","we","were","what","when","where","which","while","who","whom","why","will","with","would","yet","you","your"]),t.Pipeline.registerFunction(t.stopWordFilter,"stopWordFilter"),t.trimmer=function(t){return t.replace(/^\W+/,"").replace(/\W+$/,"")},t.Pipeline.registerFunction(t.trimmer,"trimmer"),t.TokenStore=function(){this.root={docs:{}},this.length=0},t.TokenStore.load=function(t){var e=new this;return e.root=t.root,e.length=t.length,e},t.TokenStore.prototype.add=function(t,e,n){var n=n||this.root,i=t.charAt(0),r=t.slice(1);return i in n||(n[i]={docs:{}}),0===r.length?(n[i].docs[e.ref]=e,void(this.length+=1)):this.add(r,e,n[i])},t.TokenStore.prototype.has=function(t){if(!t)return!1;for(var e=this.root,n=0;n":">",'"':""","'":"'","/":"/"};function escapeHtml(string){return String(string).replace(/[&<>"'\/]/g,function(s){return entityMap[s]})}var whiteRe=/\s*/;var spaceRe=/\s+/;var equalsRe=/\s*=/;var curlyRe=/\s*\}/;var tagRe=/#|\^|\/|>|\{|&|=|!/;function parseTemplate(template,tags){if(!template)return[];var sections=[];var tokens=[];var spaces=[];var hasTag=false;var nonSpace=false;function stripSpace(){if(hasTag&&!nonSpace){while(spaces.length)delete tokens[spaces.pop()]}else{spaces=[]}hasTag=false;nonSpace=false}var openingTagRe,closingTagRe,closingCurlyRe;function compileTags(tags){if(typeof tags==="string")tags=tags.split(spaceRe,2);if(!isArray(tags)||tags.length!==2)throw new Error("Invalid tags: "+tags);openingTagRe=new RegExp(escapeRegExp(tags[0])+"\\s*");closingTagRe=new RegExp("\\s*"+escapeRegExp(tags[1]));closingCurlyRe=new RegExp("\\s*"+escapeRegExp("}"+tags[1]))}compileTags(tags||mustache.tags);var scanner=new Scanner(template);var start,type,value,chr,token,openSection;while(!scanner.eos()){start=scanner.pos;value=scanner.scanUntil(openingTagRe);if(value){for(var i=0,valueLength=value.length;i0?sections[sections.length-1][4]:nestedTokens;break;default:collector.push(token)}}return nestedTokens}function Scanner(string){this.string=string;this.tail=string;this.pos=0}Scanner.prototype.eos=function(){return this.tail===""};Scanner.prototype.scan=function(re){var match=this.tail.match(re);if(!match||match.index!==0)return"";var string=match[0];this.tail=this.tail.substring(string.length);this.pos+=string.length;return string};Scanner.prototype.scanUntil=function(re){var index=this.tail.search(re),match;switch(index){case-1:match=this.tail;this.tail="";break;case 0:match="";break;default:match=this.tail.substring(0,index);this.tail=this.tail.substring(index)}this.pos+=match.length;return match};function Context(view,parentContext){this.view=view;this.cache={".":this.view};this.parent=parentContext}Context.prototype.push=function(view){return new Context(view,this)};Context.prototype.lookup=function(name){var cache=this.cache;var value;if(name in cache){value=cache[name]}else{var context=this,names,index,lookupHit=false;while(context){if(name.indexOf(".")>0){value=context.view;names=name.split(".");index=0;while(value!=null&&index")value=this._renderPartial(token,context,partials,originalTemplate);else if(symbol==="&")value=this._unescapedValue(token,context);else if(symbol==="name")value=this._escapedValue(token,context);else if(symbol==="text")value=this._rawValue(token);if(value!==undefined)buffer+=value}return buffer};Writer.prototype._renderSection=function(token,context,partials,originalTemplate){var self=this;var buffer="";var value=context.lookup(token[1]);function subRender(template){return self.render(template,context,partials)}if(!value)return;if(isArray(value)){for(var j=0,valueLength=value.length;jthis.depCount&&!this.defined){if(G(l)){if(this.events.error&&this.map.isDefine||g.onError!==ca)try{f=i.execCb(c,l,b,f)}catch(d){a=d}else f=i.execCb(c,l,b,f);this.map.isDefine&&void 0===f&&((b=this.module)?f=b.exports:this.usingExports&& -(f=this.exports));if(a)return a.requireMap=this.map,a.requireModules=this.map.isDefine?[this.map.id]:null,a.requireType=this.map.isDefine?"define":"require",w(this.error=a)}else f=l;this.exports=f;if(this.map.isDefine&&!this.ignore&&(r[c]=f,g.onResourceLoad))g.onResourceLoad(i,this.map,this.depMaps);y(c);this.defined=!0}this.defining=!1;this.defined&&!this.defineEmitted&&(this.defineEmitted=!0,this.emit("defined",this.exports),this.defineEmitComplete=!0)}}else this.fetch()}},callPlugin:function(){var a= -this.map,b=a.id,d=p(a.prefix);this.depMaps.push(d);q(d,"defined",u(this,function(f){var l,d;d=m(aa,this.map.id);var e=this.map.name,P=this.map.parentMap?this.map.parentMap.name:null,n=i.makeRequire(a.parentMap,{enableBuildCallback:!0});if(this.map.unnormalized){if(f.normalize&&(e=f.normalize(e,function(a){return c(a,P,!0)})||""),f=p(a.prefix+"!"+e,this.map.parentMap),q(f,"defined",u(this,function(a){this.init([],function(){return a},null,{enabled:!0,ignore:!0})})),d=m(h,f.id)){this.depMaps.push(f); -if(this.events.error)d.on("error",u(this,function(a){this.emit("error",a)}));d.enable()}}else d?(this.map.url=i.nameToUrl(d),this.load()):(l=u(this,function(a){this.init([],function(){return a},null,{enabled:!0})}),l.error=u(this,function(a){this.inited=!0;this.error=a;a.requireModules=[b];B(h,function(a){0===a.map.id.indexOf(b+"_unnormalized")&&y(a.map.id)});w(a)}),l.fromText=u(this,function(f,c){var d=a.name,e=p(d),P=M;c&&(f=c);P&&(M=!1);s(e);t(j.config,b)&&(j.config[d]=j.config[b]);try{g.exec(f)}catch(h){return w(C("fromtexteval", -"fromText eval for "+b+" failed: "+h,h,[b]))}P&&(M=!0);this.depMaps.push(e);i.completeLoad(d);n([d],l)}),f.load(a.name,n,l,j))}));i.enable(d,this);this.pluginMaps[d.id]=d},enable:function(){V[this.map.id]=this;this.enabling=this.enabled=!0;v(this.depMaps,u(this,function(a,b){var c,f;if("string"===typeof a){a=p(a,this.map.isDefine?this.map:this.map.parentMap,!1,!this.skipMap);this.depMaps[b]=a;if(c=m(L,a.id)){this.depExports[b]=c(this);return}this.depCount+=1;q(a,"defined",u(this,function(a){this.defineDep(b, -a);this.check()}));this.errback?q(a,"error",u(this,this.errback)):this.events.error&&q(a,"error",u(this,function(a){this.emit("error",a)}))}c=a.id;f=h[c];!t(L,c)&&(f&&!f.enabled)&&i.enable(a,this)}));B(this.pluginMaps,u(this,function(a){var b=m(h,a.id);b&&!b.enabled&&i.enable(a,this)}));this.enabling=!1;this.check()},on:function(a,b){var c=this.events[a];c||(c=this.events[a]=[]);c.push(b)},emit:function(a,b){v(this.events[a],function(a){a(b)});"error"===a&&delete this.events[a]}};i={config:j,contextName:b, -registry:h,defined:r,urlFetched:S,defQueue:A,Module:Z,makeModuleMap:p,nextTick:g.nextTick,onError:w,configure:function(a){a.baseUrl&&"/"!==a.baseUrl.charAt(a.baseUrl.length-1)&&(a.baseUrl+="/");var b=j.shim,c={paths:!0,bundles:!0,config:!0,map:!0};B(a,function(a,b){c[b]?(j[b]||(j[b]={}),U(j[b],a,!0,!0)):j[b]=a});a.bundles&&B(a.bundles,function(a,b){v(a,function(a){a!==b&&(aa[a]=b)})});a.shim&&(B(a.shim,function(a,c){H(a)&&(a={deps:a});if((a.exports||a.init)&&!a.exportsFn)a.exportsFn=i.makeShimExports(a); -b[c]=a}),j.shim=b);a.packages&&v(a.packages,function(a){var b,a="string"===typeof a?{name:a}:a;b=a.name;a.location&&(j.paths[b]=a.location);j.pkgs[b]=a.name+"/"+(a.main||"main").replace(ia,"").replace(Q,"")});B(h,function(a,b){!a.inited&&!a.map.unnormalized&&(a.map=p(b))});if(a.deps||a.callback)i.require(a.deps||[],a.callback)},makeShimExports:function(a){return function(){var b;a.init&&(b=a.init.apply(ba,arguments));return b||a.exports&&da(a.exports)}},makeRequire:function(a,e){function j(c,d,m){var n, -q;e.enableBuildCallback&&(d&&G(d))&&(d.__requireJsBuild=!0);if("string"===typeof c){if(G(d))return w(C("requireargs","Invalid require call"),m);if(a&&t(L,c))return L[c](h[a.id]);if(g.get)return g.get(i,c,a,j);n=p(c,a,!1,!0);n=n.id;return!t(r,n)?w(C("notloaded",'Module name "'+n+'" has not been loaded yet for context: '+b+(a?"":". Use require([])"))):r[n]}J();i.nextTick(function(){J();q=s(p(null,a));q.skipMap=e.skipMap;q.init(c,d,m,{enabled:!0});D()});return j}e=e||{};U(j,{isBrowser:z,toUrl:function(b){var d, -e=b.lastIndexOf("."),k=b.split("/")[0];if(-1!==e&&(!("."===k||".."===k)||1e.attachEvent.toString().indexOf("[native code"))&& -!Y?(M=!0,e.attachEvent("onreadystatechange",b.onScriptLoad)):(e.addEventListener("load",b.onScriptLoad,!1),e.addEventListener("error",b.onScriptError,!1)),e.src=d,J=e,D?y.insertBefore(e,D):y.appendChild(e),J=null,e;if(ea)try{importScripts(d),b.completeLoad(c)}catch(m){b.onError(C("importscripts","importScripts failed for "+c+" at "+d,m,[c]))}};z&&!q.skipDataMain&&T(document.getElementsByTagName("script"),function(b){y||(y=b.parentNode);if(I=b.getAttribute("data-main"))return s=I,q.baseUrl||(E=s.split("/"), -s=E.pop(),O=E.length?E.join("/")+"/":"./",q.baseUrl=O),s=s.replace(Q,""),g.jsExtRegExp.test(s)&&(s=I),q.deps=q.deps?q.deps.concat(s):[s],!0});define=function(b,c,d){var e,g;"string"!==typeof b&&(d=c,c=b,b=null);H(c)||(d=c,c=null);!c&&G(d)&&(c=[],d.length&&(d.toString().replace(ka,"").replace(la,function(b,d){c.push(d)}),c=(1===d.length?["require"]:["require","exports","module"]).concat(c)));if(M){if(!(e=J))N&&"interactive"===N.readyState||T(document.getElementsByTagName("script"),function(b){if("interactive"=== -b.readyState)return N=b}),e=N;e&&(b||(b=e.getAttribute("data-requiremodule")),g=F[e.getAttribute("data-requirecontext")])}(g?g.defQueue:R).push([b,c,d])};define.amd={jQuery:!0};g.exec=function(b){return eval(b)};g(q)}})(this); diff --git a/docs/mkdocs/js/search-results-template.mustache b/docs/mkdocs/js/search-results-template.mustache deleted file mode 100644 index a8b3862f2..000000000 --- a/docs/mkdocs/js/search-results-template.mustache +++ /dev/null @@ -1,4 +0,0 @@ - diff --git a/docs/mkdocs/js/search.js b/docs/mkdocs/js/search.js deleted file mode 100644 index d5c866164..000000000 --- a/docs/mkdocs/js/search.js +++ /dev/null @@ -1,88 +0,0 @@ -require([ - base_url + '/mkdocs/js/mustache.min.js', - base_url + '/mkdocs/js/lunr.min.js', - 'text!search-results-template.mustache', - 'text!../search_index.json', -], function (Mustache, lunr, results_template, data) { - "use strict"; - - function getSearchTerm() - { - var sPageURL = window.location.search.substring(1); - var sURLVariables = sPageURL.split('&'); - for (var i = 0; i < sURLVariables.length; i++) - { - var sParameterName = sURLVariables[i].split('='); - if (sParameterName[0] == 'q') - { - return decodeURIComponent(sParameterName[1].replace(/\+/g, '%20')); - } - } - } - - var index = lunr(function () { - this.field('title', {boost: 10}); - this.field('text'); - this.ref('location'); - }); - - data = JSON.parse(data); - var documents = {}; - - for (var i=0; i < data.docs.length; i++){ - var doc = data.docs[i]; - doc.location = base_url + doc.location; - index.add(doc); - documents[doc.location] = doc; - } - - var search = function(){ - - var query = document.getElementById('mkdocs-search-query').value; - var search_results = document.getElementById("mkdocs-search-results"); - while (search_results.firstChild) { - search_results.removeChild(search_results.firstChild); - } - - if(query === ''){ - return; - } - - var results = index.search(query); - - if (results.length > 0){ - for (var i=0; i < results.length; i++){ - var result = results[i]; - doc = documents[result.ref]; - doc.base_url = base_url; - doc.summary = doc.text.substring(0, 200); - var html = Mustache.to_html(results_template, doc); - search_results.insertAdjacentHTML('beforeend', html); - } - } else { - search_results.insertAdjacentHTML('beforeend', "

    No results found

    "); - } - - if(jQuery){ - /* - * We currently only automatically hide bootstrap models. This - * requires jQuery to work. - */ - jQuery('#mkdocs_search_modal a').click(function(){ - jQuery('#mkdocs_search_modal').modal('hide'); - }); - } - - }; - - var search_input = document.getElementById('mkdocs-search-query'); - - var term = getSearchTerm(); - if (term){ - search_input.value = term; - search(); - } - - search_input.addEventListener("keyup", search); - -}); diff --git a/docs/mkdocs/js/text.js b/docs/mkdocs/js/text.js deleted file mode 100644 index 17921b6e5..000000000 --- a/docs/mkdocs/js/text.js +++ /dev/null @@ -1,390 +0,0 @@ -/** - * @license RequireJS text 2.0.12 Copyright (c) 2010-2014, The Dojo Foundation All Rights Reserved. - * Available via the MIT or new BSD license. - * see: http://github.com/requirejs/text for details - */ -/*jslint regexp: true */ -/*global require, XMLHttpRequest, ActiveXObject, - define, window, process, Packages, - java, location, Components, FileUtils */ - -define(['module'], function (module) { - 'use strict'; - - var text, fs, Cc, Ci, xpcIsWindows, - progIds = ['Msxml2.XMLHTTP', 'Microsoft.XMLHTTP', 'Msxml2.XMLHTTP.4.0'], - xmlRegExp = /^\s*<\?xml(\s)+version=[\'\"](\d)*.(\d)*[\'\"](\s)*\?>/im, - bodyRegExp = /]*>\s*([\s\S]+)\s*<\/body>/im, - hasLocation = typeof location !== 'undefined' && location.href, - defaultProtocol = hasLocation && location.protocol && location.protocol.replace(/\:/, ''), - defaultHostName = hasLocation && location.hostname, - defaultPort = hasLocation && (location.port || undefined), - buildMap = {}, - masterConfig = (module.config && module.config()) || {}; - - text = { - version: '2.0.12', - - strip: function (content) { - //Strips declarations so that external SVG and XML - //documents can be added to a document without worry. Also, if the string - //is an HTML document, only the part inside the body tag is returned. - if (content) { - content = content.replace(xmlRegExp, ""); - var matches = content.match(bodyRegExp); - if (matches) { - content = matches[1]; - } - } else { - content = ""; - } - return content; - }, - - jsEscape: function (content) { - return content.replace(/(['\\])/g, '\\$1') - .replace(/[\f]/g, "\\f") - .replace(/[\b]/g, "\\b") - .replace(/[\n]/g, "\\n") - .replace(/[\t]/g, "\\t") - .replace(/[\r]/g, "\\r") - .replace(/[\u2028]/g, "\\u2028") - .replace(/[\u2029]/g, "\\u2029"); - }, - - createXhr: masterConfig.createXhr || function () { - //Would love to dump the ActiveX crap in here. Need IE 6 to die first. - var xhr, i, progId; - if (typeof XMLHttpRequest !== "undefined") { - return new XMLHttpRequest(); - } else if (typeof ActiveXObject !== "undefined") { - for (i = 0; i < 3; i += 1) { - progId = progIds[i]; - try { - xhr = new ActiveXObject(progId); - } catch (e) {} - - if (xhr) { - progIds = [progId]; // so faster next time - break; - } - } - } - - return xhr; - }, - - /** - * Parses a resource name into its component parts. Resource names - * look like: module/name.ext!strip, where the !strip part is - * optional. - * @param {String} name the resource name - * @returns {Object} with properties "moduleName", "ext" and "strip" - * where strip is a boolean. - */ - parseName: function (name) { - var modName, ext, temp, - strip = false, - index = name.indexOf("."), - isRelative = name.indexOf('./') === 0 || - name.indexOf('../') === 0; - - if (index !== -1 && (!isRelative || index > 1)) { - modName = name.substring(0, index); - ext = name.substring(index + 1, name.length); - } else { - modName = name; - } - - temp = ext || modName; - index = temp.indexOf("!"); - if (index !== -1) { - //Pull off the strip arg. - strip = temp.substring(index + 1) === "strip"; - temp = temp.substring(0, index); - if (ext) { - ext = temp; - } else { - modName = temp; - } - } - - return { - moduleName: modName, - ext: ext, - strip: strip - }; - }, - - xdRegExp: /^((\w+)\:)?\/\/([^\/\\]+)/, - - /** - * Is an URL on another domain. Only works for browser use, returns - * false in non-browser environments. Only used to know if an - * optimized .js version of a text resource should be loaded - * instead. - * @param {String} url - * @returns Boolean - */ - useXhr: function (url, protocol, hostname, port) { - var uProtocol, uHostName, uPort, - match = text.xdRegExp.exec(url); - if (!match) { - return true; - } - uProtocol = match[2]; - uHostName = match[3]; - - uHostName = uHostName.split(':'); - uPort = uHostName[1]; - uHostName = uHostName[0]; - - return (!uProtocol || uProtocol === protocol) && - (!uHostName || uHostName.toLowerCase() === hostname.toLowerCase()) && - ((!uPort && !uHostName) || uPort === port); - }, - - finishLoad: function (name, strip, content, onLoad) { - content = strip ? text.strip(content) : content; - if (masterConfig.isBuild) { - buildMap[name] = content; - } - onLoad(content); - }, - - load: function (name, req, onLoad, config) { - //Name has format: some.module.filext!strip - //The strip part is optional. - //if strip is present, then that means only get the string contents - //inside a body tag in an HTML string. For XML/SVG content it means - //removing the declarations so the content can be inserted - //into the current doc without problems. - - // Do not bother with the work if a build and text will - // not be inlined. - if (config && config.isBuild && !config.inlineText) { - onLoad(); - return; - } - - masterConfig.isBuild = config && config.isBuild; - - var parsed = text.parseName(name), - nonStripName = parsed.moduleName + - (parsed.ext ? '.' + parsed.ext : ''), - url = req.toUrl(nonStripName), - useXhr = (masterConfig.useXhr) || - text.useXhr; - - // Do not load if it is an empty: url - if (url.indexOf('empty:') === 0) { - onLoad(); - return; - } - - //Load the text. Use XHR if possible and in a browser. - if (!hasLocation || useXhr(url, defaultProtocol, defaultHostName, defaultPort)) { - text.get(url, function (content) { - text.finishLoad(name, parsed.strip, content, onLoad); - }, function (err) { - if (onLoad.error) { - onLoad.error(err); - } - }); - } else { - //Need to fetch the resource across domains. Assume - //the resource has been optimized into a JS module. Fetch - //by the module name + extension, but do not include the - //!strip part to avoid file system issues. - req([nonStripName], function (content) { - text.finishLoad(parsed.moduleName + '.' + parsed.ext, - parsed.strip, content, onLoad); - }); - } - }, - - write: function (pluginName, moduleName, write, config) { - if (buildMap.hasOwnProperty(moduleName)) { - var content = text.jsEscape(buildMap[moduleName]); - write.asModule(pluginName + "!" + moduleName, - "define(function () { return '" + - content + - "';});\n"); - } - }, - - writeFile: function (pluginName, moduleName, req, write, config) { - var parsed = text.parseName(moduleName), - extPart = parsed.ext ? '.' + parsed.ext : '', - nonStripName = parsed.moduleName + extPart, - //Use a '.js' file name so that it indicates it is a - //script that can be loaded across domains. - fileName = req.toUrl(parsed.moduleName + extPart) + '.js'; - - //Leverage own load() method to load plugin value, but only - //write out values that do not have the strip argument, - //to avoid any potential issues with ! in file names. - text.load(nonStripName, req, function (value) { - //Use own write() method to construct full module value. - //But need to create shell that translates writeFile's - //write() to the right interface. - var textWrite = function (contents) { - return write(fileName, contents); - }; - textWrite.asModule = function (moduleName, contents) { - return write.asModule(moduleName, fileName, contents); - }; - - text.write(pluginName, nonStripName, textWrite, config); - }, config); - } - }; - - if (masterConfig.env === 'node' || (!masterConfig.env && - typeof process !== "undefined" && - process.versions && - !!process.versions.node && - !process.versions['node-webkit'])) { - //Using special require.nodeRequire, something added by r.js. - fs = require.nodeRequire('fs'); - - text.get = function (url, callback, errback) { - try { - var file = fs.readFileSync(url, 'utf8'); - //Remove BOM (Byte Mark Order) from utf8 files if it is there. - if (file.indexOf('\uFEFF') === 0) { - file = file.substring(1); - } - callback(file); - } catch (e) { - if (errback) { - errback(e); - } - } - }; - } else if (masterConfig.env === 'xhr' || (!masterConfig.env && - text.createXhr())) { - text.get = function (url, callback, errback, headers) { - var xhr = text.createXhr(), header; - xhr.open('GET', url, true); - - //Allow plugins direct access to xhr headers - if (headers) { - for (header in headers) { - if (headers.hasOwnProperty(header)) { - xhr.setRequestHeader(header.toLowerCase(), headers[header]); - } - } - } - - //Allow overrides specified in config - if (masterConfig.onXhr) { - masterConfig.onXhr(xhr, url); - } - - xhr.onreadystatechange = function (evt) { - var status, err; - //Do not explicitly handle errors, those should be - //visible via console output in the browser. - if (xhr.readyState === 4) { - status = xhr.status || 0; - if (status > 399 && status < 600) { - //An http 4xx or 5xx error. Signal an error. - err = new Error(url + ' HTTP status: ' + status); - err.xhr = xhr; - if (errback) { - errback(err); - } - } else { - callback(xhr.responseText); - } - - if (masterConfig.onXhrComplete) { - masterConfig.onXhrComplete(xhr, url); - } - } - }; - xhr.send(null); - }; - } else if (masterConfig.env === 'rhino' || (!masterConfig.env && - typeof Packages !== 'undefined' && typeof java !== 'undefined')) { - //Why Java, why is this so awkward? - text.get = function (url, callback) { - var stringBuffer, line, - encoding = "utf-8", - file = new java.io.File(url), - lineSeparator = java.lang.System.getProperty("line.separator"), - input = new java.io.BufferedReader(new java.io.InputStreamReader(new java.io.FileInputStream(file), encoding)), - content = ''; - try { - stringBuffer = new java.lang.StringBuffer(); - line = input.readLine(); - - // Byte Order Mark (BOM) - The Unicode Standard, version 3.0, page 324 - // http://www.unicode.org/faq/utf_bom.html - - // Note that when we use utf-8, the BOM should appear as "EF BB BF", but it doesn't due to this bug in the JDK: - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4508058 - if (line && line.length() && line.charAt(0) === 0xfeff) { - // Eat the BOM, since we've already found the encoding on this file, - // and we plan to concatenating this buffer with others; the BOM should - // only appear at the top of a file. - line = line.substring(1); - } - - if (line !== null) { - stringBuffer.append(line); - } - - while ((line = input.readLine()) !== null) { - stringBuffer.append(lineSeparator); - stringBuffer.append(line); - } - //Make sure we return a JavaScript string and not a Java string. - content = String(stringBuffer.toString()); //String - } finally { - input.close(); - } - callback(content); - }; - } else if (masterConfig.env === 'xpconnect' || (!masterConfig.env && - typeof Components !== 'undefined' && Components.classes && - Components.interfaces)) { - //Avert your gaze! - Cc = Components.classes; - Ci = Components.interfaces; - Components.utils['import']('resource://gre/modules/FileUtils.jsm'); - xpcIsWindows = ('@mozilla.org/windows-registry-key;1' in Cc); - - text.get = function (url, callback) { - var inStream, convertStream, fileObj, - readData = {}; - - if (xpcIsWindows) { - url = url.replace(/\//g, '\\'); - } - - fileObj = new FileUtils.File(url); - - //XPCOM, you so crazy - try { - inStream = Cc['@mozilla.org/network/file-input-stream;1'] - .createInstance(Ci.nsIFileInputStream); - inStream.init(fileObj, 1, 0, false); - - convertStream = Cc['@mozilla.org/intl/converter-input-stream;1'] - .createInstance(Ci.nsIConverterInputStream); - convertStream.init(inStream, "utf-8", inStream.available(), - Ci.nsIConverterInputStream.DEFAULT_REPLACEMENT_CHARACTER); - - convertStream.readString(inStream.available(), readData); - convertStream.close(); - inStream.close(); - callback(readData.value); - } catch (e) { - throw new Error((fileObj && fileObj.path || '') + ': ' + e); - } - }; - } - return text; -}); diff --git a/docs/mkdocs/search_index.json b/docs/mkdocs/search_index.json deleted file mode 100644 index 9c7452d0f..000000000 --- a/docs/mkdocs/search_index.json +++ /dev/null @@ -1,1904 +0,0 @@ -{ - "docs": [ - { - "location": "/", - "text": "Build, Manage, and Deploy Your Application\n#\n\n\nMeet Layer0\n#\n\n\nLayer0 is a framework that helps you deploy web applications to the cloud with minimal fuss. Using a simple command line interface (CLI), you can manage the entire life cycle of your application without having to focus on infrastructure.\n\n\nReady to learn more about Layer0? See our \nintroduction page\n to learn about some important concepts. When you're ready to get started, take a look at the \ninstallation page\n for information about setting up Layer0.\n\n\nDownload\n#\n\n\n\n\n\n\n\n\nDownload \nv0.10.3\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\n\n\nContact Us\n#\n\n\nIf you have questions about Layer0, email the development team at \ncarbon@us.imshealth.com\n.", - "title": "Home" - }, - { - "location": "/#build-manage-and-deploy-your-application", - "text": "", - "title": "Build, Manage, and Deploy Your Application" - }, - { - "location": "/#meet-layer0", - "text": "Layer0 is a framework that helps you deploy web applications to the cloud with minimal fuss. Using a simple command line interface (CLI), you can manage the entire life cycle of your application without having to focus on infrastructure. Ready to learn more about Layer0? See our introduction page to learn about some important concepts. When you're ready to get started, take a look at the installation page for information about setting up Layer0.", - "title": "Meet Layer0" - }, - { - "location": "/#download", - "text": "Download v0.10.3 macOS Linux Windows", - "title": "Download" - }, - { - "location": "/#contact-us", - "text": "If you have questions about Layer0, email the development team at carbon@us.imshealth.com .", - "title": "Contact Us" - }, - { - "location": "/releases/", - "text": "Version\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\n\n\n\n\nv0.10.3\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.10.2\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.10.1\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.10.0\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.9.0\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.8.4\n\n\nmacOS\n\n\nLinux\n\n\nWindows", - "title": "Releases" - }, - { - "location": "/intro/", - "text": "Layer0 Introduction\n#\n\n\nIn recent years, the process of deploying applications has seen incredible innovation. However, this innovation has taken a somewhat simple task and made it into something quite \ncomplicated\n. Cloud providers, load balancing, virtual servers, IP subnets, and a continuing list of technological considerations are not only required to be understood, but their creation and management must be automated for a modern application to be successful at scale.\n\n\nThe burden of understanding a complicated and ever-growing infrastructure is a large aspect of what Layer0 is trying to fix. We've already done the leg work for huge swathes of your backend infrastructure, and we've made it easy to tear down and start over again, too. Meanwhile, you can develop locally using \nDocker\n and be assured that your application will properly translate to the cloud when you're ready to deploy.\n\n\nLayer0 requires a solid understanding of Docker to get the most out of it. We highly recommend starting with \nDocker's Understanding the Architecture\n to learn more about using Docker locally and in the cloud. We also recommend the \nTwelve-Factor App\n primer, which is a critical resource for understanding how to build a microservice.\n\n\n\n\nLayer0 Concepts\n#\n\n\nThe following concepts are core Layer0 abstractions for the technologies and features we use \nbehind the scenes\n. These terms will be used throughout our guides, so having a general understanding of them is helpful.\n\n\nCertificates\n#\n\n\nSSL certificates obtained from a valid \nCertificate Authority (CA)\n. You can use these certificates to secure your HTTPS services by applying them to your Layer0 load balancers.\n\n\nDeploys\n#\n\n\nECS Task Definitions\n. These configuration files detail how to deploy your application. We have several \nsample applications\n available that show what these files look like --- they're called \nDockerrun.aws.json\n within each sample app.\n\n\nTasks\n#\n\n\nManual one-off commands that don't necessarily make sense to keep running, or to restart when they finish. These run using Amazon's \nRunTask\n action (more info \nhere\n), and are \"ideally suited for processes such as batch jobs that perform work and then stop.\"\n\n\nLoad Balancers\n#\n\n\nPowerful tools that give you the basic building blocks for high-availability, scaling, and HTTPS. We currently use Amazon's \nElastic Load Balancing\n, and it pays to understand the basics of this service when working with Layer0.\n\n\nServices\n#\n\n\nYour running Layer0 applications. We also use the term \nservice\n for tools such as Consul, for which we provide a pre-built \nsample implementation\n using Layer0.\n\n\nEnvironments\n#\n\n\nLogical groupings of services. Typically, you would make a single environment for each tier of your application, such as \ndev\n, \nstaging\n, and \nprod\n.", - "title": "Introduction" - }, - { - "location": "/intro/#layer0-introduction", - "text": "In recent years, the process of deploying applications has seen incredible innovation. However, this innovation has taken a somewhat simple task and made it into something quite complicated . Cloud providers, load balancing, virtual servers, IP subnets, and a continuing list of technological considerations are not only required to be understood, but their creation and management must be automated for a modern application to be successful at scale. The burden of understanding a complicated and ever-growing infrastructure is a large aspect of what Layer0 is trying to fix. We've already done the leg work for huge swathes of your backend infrastructure, and we've made it easy to tear down and start over again, too. Meanwhile, you can develop locally using Docker and be assured that your application will properly translate to the cloud when you're ready to deploy. Layer0 requires a solid understanding of Docker to get the most out of it. We highly recommend starting with Docker's Understanding the Architecture to learn more about using Docker locally and in the cloud. We also recommend the Twelve-Factor App primer, which is a critical resource for understanding how to build a microservice.", - "title": "Layer0 Introduction" - }, - { - "location": "/intro/#layer0-concepts", - "text": "The following concepts are core Layer0 abstractions for the technologies and features we use behind the scenes . These terms will be used throughout our guides, so having a general understanding of them is helpful.", - "title": "Layer0 Concepts" - }, - { - "location": "/intro/#certificates", - "text": "SSL certificates obtained from a valid Certificate Authority (CA) . You can use these certificates to secure your HTTPS services by applying them to your Layer0 load balancers.", - "title": "Certificates" - }, - { - "location": "/intro/#deploys", - "text": "ECS Task Definitions . These configuration files detail how to deploy your application. We have several sample applications available that show what these files look like --- they're called Dockerrun.aws.json within each sample app.", - "title": "Deploys" - }, - { - "location": "/intro/#tasks", - "text": "Manual one-off commands that don't necessarily make sense to keep running, or to restart when they finish. These run using Amazon's RunTask action (more info here ), and are \"ideally suited for processes such as batch jobs that perform work and then stop.\"", - "title": "Tasks" - }, - { - "location": "/intro/#load-balancers", - "text": "Powerful tools that give you the basic building blocks for high-availability, scaling, and HTTPS. We currently use Amazon's Elastic Load Balancing , and it pays to understand the basics of this service when working with Layer0.", - "title": "Load Balancers" - }, - { - "location": "/intro/#services", - "text": "Your running Layer0 applications. We also use the term service for tools such as Consul, for which we provide a pre-built sample implementation using Layer0.", - "title": "Services" - }, - { - "location": "/intro/#environments", - "text": "Logical groupings of services. Typically, you would make a single environment for each tier of your application, such as dev , staging , and prod .", - "title": "Environments" - }, - { - "location": "/setup/install/", - "text": "Create a new Layer0 Instance\n#\n\n\nPrerequisites\n#\n\n\nBefore you can install and configure Layer0, you must obtain the following:\n\n\n\n\n\n\nAn AWS account.\n\n\n\n\n\n\nAn EC2 Key Pair.\n\nThis key pair allows you to access the EC2 instances running your Services using SSH.\nIf you have already created a key pair, you can use it for this process.\nOtherwise, follow the \ninstructions at aws.amazon.com\n to create a new key pair.\nMake a note of the name that you selected when creating the key pair.\n\n\n\n\n\n\nTerraform v0.9.4+\n\nWe use Terraform to create the resources that Layer0 needs.\nIf you're unfamiliar with Terraform, you may want to check out our \nintroduction\n.\nIf you're ready to install Terraform, there are instructions in the \nTerraform documentation\n.\n\n\n\n\n\n\nPart 1: Download and extract Layer0\n#\n\n\n\n\nIn the \nDownloads section of the home page\n, select the appropriate installation file for your operating system. Extract the zip file to a directory on your computer.\n\n\n(Optional) Place the \nl0\n and \nl0-setup\n binaries into your system path. \nFor more information about adding directories to your system path, see the following resources:\n\n\n(Windows): \nHow to Edit Your System PATH for Easy Command Line Access in Windows\n\n\n(Linux/macOS): \nAdding a Directory to the Path\n\n\n\n\n\n\n\n\nPart 2: Create an Access Key\n#\n\n\nThis step will create an Identity \n Access Management (IAM) access key for your AWS account. \nYou will use the credentials created in this section when creating, updating, or removing Layer0 instances.\n\n\nTo create an Access Key:\n\n\n\n\n\n\nIn a web browser, login to the \nAWS Console\n.\n\n\n\n\n\n\nUnder \nSecurity and Identity\n, click \nIdentity and Access Management\n.\n\n\n\n\n\n\nClick \nGroups\n, and then click \nAdministrators\n. \nNote\nIf the \nAdministrators\n group does not already exist, complete the following steps: \nClick \nCreate New Group\n. Name the new group \"Administrators\", and then click \nNext Step\n.\nClick \nAdministratorAccess\n to attach the Administrator policy to your new group.\nClick \nNext Step\n, and then click \nCreate Group\n.\n\n\n\n\n\n\nClick \nUsers\n.\n\n\n\n\n\n\nClick \nCreate New Users\n and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to \nGenerate an Access Key for each user\n, and then click \nCreate\n.\n\n\n\n\n\n\nOnce your user account has been created, click \nDownload Credentials\n to save your access key to a CSV file.\n\n\n\n\n\n\nIn the Users list, click the user account you just created. Under \nUser Actions\n, click \nAdd User to Groups\n.\n\n\n\n\n\n\nSelect the group \nAdministrators\n and click \nAdd to Groups\n. This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe!\n\n\n\n\n\n\nPart 3: Create a new Layer0 Instance\n#\n\n\nNow that you have downloaded Layer0 and configured your AWS account, you can create your Layer0 instance.\nFrom a command prompt, run the following (replacing \ninstance_name\n with a name for your Layer0 instance):\n\n\n$ l0-setup init \ninstance_name\n\n\n\n\nThis command will prompt you for many different inputs. \nEnter the required values for \nAWS Access Key\n, \nAWS Secret Key\n, and \nAWS SSH Key\n as they come up.\nAll remaining inputs are optional and can be set to their default by pressing enter.\n\n\n...\nAWS Access Key: The access_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key.\nIt is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will\nonly use this key for 'l0-setup' commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: \nnone\n]\nPlease enter a value and press 'enter'.\n Input: ABC123xzy\n\nAWS Secret Key: The secret_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key.\nIt is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will\nonly use this key for 'l0-setup' commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: \nnone\n]\nPlease enter a value and press 'enter'.\n Input: ZXY987cba\n\nAWS SSH Key Pair: The ssh_key_pair input variable specifies the name of the\nssh key pair to include in EC2 instances provisioned by Layer0. This key pair must\nalready exist in the AWS account. The names of existing key pairs can be found\nin the EC2 dashboard. Note that changing this value will not effect instances\nthat have already been provisioned.\n\n[current: \nnone\n]\nPlease enter a value and press 'enter'.\n Input: mySSHKey\n...\n\n\n\n\nOnce the \ninit\n command has successfully completed, you're ready to actually create the resources needed to use Layer0.\nRun the following command (again, replace \ninstance_name\n with the name you've chosen for your Layer0 instance):\n\n\nl0-setup apply \ninstance_name\n\n\n\n\nThe first time you run the \napply\n command, it may take around 5 minutes to complete. \nThis command is idempotent; it is safe to run multiple times if it fails the first.\n\n\nIt's a good idea to run the \npush\n command (\nl0-setup push \ninstance_name\n) after \napply\n commands complete. \nThis will send a backup of your Layer0 instance's configuration and state to S3. \nThese files can be grabbed later using the \npull\n command (\nl0-setup pull \ninstance_name\n). \n\n\n\n\nUsing a Private Docker Registry\n\n\nThe procedures in this section are optional, but are highly recommended for production use.\n\n\n\n\nIf you require authentication to a private Docker registry, you will need a Docker configuration file present on your machine with access to private repositories (typically located at \n~/.docker/config.json\n). \n\n\nIf you don't have a config file yet, you can generate one by running \ndocker login [registry-address]\n. \nA configuration file will be generated at \n~/.docker/config.json\n.\n\n\nTo add this authentication to your Layer0 instance, run:\n\n\n$ l0-setup init --docker-path=\npath/to/config.json\n \ninstance_name\n\n\n\n\nThis will add a rendered file into your Layer0 instance's directory at \n~/.layer0/\ninstance_name\n/dockercfg.json\n.\n\n\nYou can modify a Layer0 instance's \ndockercfg.json\n file and re-run the \napply\n command (\nl0-setup apply \ninstance_name\n) to make changes to your authentication. \nNote that any EC2 instances created prior to changing your \ndockercfg.json\n file will need to be manually terminated since they only grab the authentication file during instance creation. \nTerminated EC2 instances will be automatically re-created by autoscaling.\n\n\n\n\nUsing an Existing VPC\n\n\nThe procedures in this section must be followed to properly install Layer0 into an existing VPC\n\n\n\n\nBy default, l0-setup creates a new VPC to place resources. \nHowever, l0-setup can place resources in an existing VPC if it meets the following conditions:\n\n\n\n\nHas access to the public internet (through a NAT instance or gateway)\n\n\nHas at least 1 public and 1 private subnet\n\n\nThe public and private subnets have the tag \nTier: Public\n or \nTier: Private\n, respectively.\nFor information on how to tag AWS resources, please visit the \nAWS documentation\n. \n\n\n\n\nOnce you are sure the existing VPC satisfies these requirements, run the \ninit\n command, \nplacing the VPC ID when prompted:\n\n\n$ l0-setup init \ninstance_name\n\n...\nVPC ID (optional): The vpc_id input variable specifies an existing AWS VPC to provision\nthe AWS resources required for Layer0. If no input is specified, a new VPC will be\ncreated for you. Existing VPCs must satisfy the following constraints:\n\n - Have access to the public internet (through a NAT instance or gateway)\n - Have at least 1 public and 1 private subnet\n - Each subnet must be tagged with [\nTier\n: \nPrivate\n] or [\nTier\n: \nPublic\n]\n\nNote that changing this value will destroy and recreate any existing resources.\n\n[current: ]\nPlease enter a new value, or press 'enter' to keep the current value.\n Input: vpc123\n\n\n\n\nOnce the command has completed, it is safe to run \napply\n to provision the resources. \n\n\nPart 4: Connect to a Layer0 Instance\n#\n\n\nOnce the \napply\n command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the \nendpoint\n command.\n\n\n$ l0-setup endpoint --insecure \ninstance_name\n\nexport LAYER0_API_ENDPOINT=\nhttps://l0-instance_name-api-123456.us-west-2.elb.amazonaws.com\n\nexport LAYER0_AUTH_TOKEN=\nabcDEFG123\n\nexport LAYER0_SKIP_SSL_VERIFY=\n1\n\nexport LAYER0_SKIP_VERSION_VERIFY=\n1\n\n\n\n\nThe \n--insecure\n flag shows configurations that bypass SSL and version verifications. \nThis is required as the Layer0 API created uses a self-signed certificate by default.\nThese settings are \nnot\n recommended for production use!\n\n\nThe \nendpoint\n command supports a \n--syntax\n option, which can be used to turn configuration into a single line:\n\n\n\n\nBash (default) - \n$ eval \"$(l0-setup endpoint --insecure \ninstance_name\n)\"\n\n\nPowershell - \n$ l0-setup endpoint --insecure --syntax=powershell \ninstance_name\n | Out-String | Invoke-Expression", - "title": "Install" - }, - { - "location": "/setup/install/#create-a-new-layer0-instance", - "text": "", - "title": "Create a new Layer0 Instance" - }, - { - "location": "/setup/install/#prerequisites", - "text": "Before you can install and configure Layer0, you must obtain the following: An AWS account. An EC2 Key Pair. \nThis key pair allows you to access the EC2 instances running your Services using SSH.\nIf you have already created a key pair, you can use it for this process.\nOtherwise, follow the instructions at aws.amazon.com to create a new key pair.\nMake a note of the name that you selected when creating the key pair. Terraform v0.9.4+ \nWe use Terraform to create the resources that Layer0 needs.\nIf you're unfamiliar with Terraform, you may want to check out our introduction .\nIf you're ready to install Terraform, there are instructions in the Terraform documentation .", - "title": "Prerequisites" - }, - { - "location": "/setup/install/#part-1-download-and-extract-layer0", - "text": "In the Downloads section of the home page , select the appropriate installation file for your operating system. Extract the zip file to a directory on your computer. (Optional) Place the l0 and l0-setup binaries into your system path. \nFor more information about adding directories to your system path, see the following resources: (Windows): How to Edit Your System PATH for Easy Command Line Access in Windows (Linux/macOS): Adding a Directory to the Path", - "title": "Part 1: Download and extract Layer0" - }, - { - "location": "/setup/install/#part-2-create-an-access-key", - "text": "This step will create an Identity Access Management (IAM) access key for your AWS account. \nYou will use the credentials created in this section when creating, updating, or removing Layer0 instances. To create an Access Key: In a web browser, login to the AWS Console . Under Security and Identity , click Identity and Access Management . Click Groups , and then click Administrators . Note If the Administrators group does not already exist, complete the following steps: Click Create New Group . Name the new group \"Administrators\", and then click Next Step . Click AdministratorAccess to attach the Administrator policy to your new group. Click Next Step , and then click Create Group . Click Users . Click Create New Users and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to Generate an Access Key for each user , and then click Create . Once your user account has been created, click Download Credentials to save your access key to a CSV file. In the Users list, click the user account you just created. Under User Actions , click Add User to Groups . Select the group Administrators and click Add to Groups . This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe!", - "title": "Part 2: Create an Access Key" - }, - { - "location": "/setup/install/#part-3-create-a-new-layer0-instance", - "text": "Now that you have downloaded Layer0 and configured your AWS account, you can create your Layer0 instance.\nFrom a command prompt, run the following (replacing instance_name with a name for your Layer0 instance): $ l0-setup init instance_name This command will prompt you for many different inputs. \nEnter the required values for AWS Access Key , AWS Secret Key , and AWS SSH Key as they come up.\nAll remaining inputs are optional and can be set to their default by pressing enter. ...\nAWS Access Key: The access_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key.\nIt is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will\nonly use this key for 'l0-setup' commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: none ]\nPlease enter a value and press 'enter'.\n Input: ABC123xzy\n\nAWS Secret Key: The secret_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key.\nIt is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will\nonly use this key for 'l0-setup' commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: none ]\nPlease enter a value and press 'enter'.\n Input: ZXY987cba\n\nAWS SSH Key Pair: The ssh_key_pair input variable specifies the name of the\nssh key pair to include in EC2 instances provisioned by Layer0. This key pair must\nalready exist in the AWS account. The names of existing key pairs can be found\nin the EC2 dashboard. Note that changing this value will not effect instances\nthat have already been provisioned.\n\n[current: none ]\nPlease enter a value and press 'enter'.\n Input: mySSHKey\n... Once the init command has successfully completed, you're ready to actually create the resources needed to use Layer0.\nRun the following command (again, replace instance_name with the name you've chosen for your Layer0 instance): l0-setup apply instance_name The first time you run the apply command, it may take around 5 minutes to complete. \nThis command is idempotent; it is safe to run multiple times if it fails the first. It's a good idea to run the push command ( l0-setup push instance_name ) after apply commands complete. \nThis will send a backup of your Layer0 instance's configuration and state to S3. \nThese files can be grabbed later using the pull command ( l0-setup pull instance_name ). Using a Private Docker Registry The procedures in this section are optional, but are highly recommended for production use. If you require authentication to a private Docker registry, you will need a Docker configuration file present on your machine with access to private repositories (typically located at ~/.docker/config.json ). If you don't have a config file yet, you can generate one by running docker login [registry-address] . \nA configuration file will be generated at ~/.docker/config.json . To add this authentication to your Layer0 instance, run: $ l0-setup init --docker-path= path/to/config.json instance_name This will add a rendered file into your Layer0 instance's directory at ~/.layer0/ instance_name /dockercfg.json . You can modify a Layer0 instance's dockercfg.json file and re-run the apply command ( l0-setup apply instance_name ) to make changes to your authentication. \nNote that any EC2 instances created prior to changing your dockercfg.json file will need to be manually terminated since they only grab the authentication file during instance creation. \nTerminated EC2 instances will be automatically re-created by autoscaling. Using an Existing VPC The procedures in this section must be followed to properly install Layer0 into an existing VPC By default, l0-setup creates a new VPC to place resources. \nHowever, l0-setup can place resources in an existing VPC if it meets the following conditions: Has access to the public internet (through a NAT instance or gateway) Has at least 1 public and 1 private subnet The public and private subnets have the tag Tier: Public or Tier: Private , respectively.\nFor information on how to tag AWS resources, please visit the AWS documentation . Once you are sure the existing VPC satisfies these requirements, run the init command, \nplacing the VPC ID when prompted: $ l0-setup init instance_name \n...\nVPC ID (optional): The vpc_id input variable specifies an existing AWS VPC to provision\nthe AWS resources required for Layer0. If no input is specified, a new VPC will be\ncreated for you. Existing VPCs must satisfy the following constraints:\n\n - Have access to the public internet (through a NAT instance or gateway)\n - Have at least 1 public and 1 private subnet\n - Each subnet must be tagged with [ Tier : Private ] or [ Tier : Public ]\n\nNote that changing this value will destroy and recreate any existing resources.\n\n[current: ]\nPlease enter a new value, or press 'enter' to keep the current value.\n Input: vpc123 Once the command has completed, it is safe to run apply to provision the resources.", - "title": "Part 3: Create a new Layer0 Instance" - }, - { - "location": "/setup/install/#part-4-connect-to-a-layer0-instance", - "text": "Once the apply command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the endpoint command. $ l0-setup endpoint --insecure instance_name \nexport LAYER0_API_ENDPOINT= https://l0-instance_name-api-123456.us-west-2.elb.amazonaws.com \nexport LAYER0_AUTH_TOKEN= abcDEFG123 \nexport LAYER0_SKIP_SSL_VERIFY= 1 \nexport LAYER0_SKIP_VERSION_VERIFY= 1 The --insecure flag shows configurations that bypass SSL and version verifications. \nThis is required as the Layer0 API created uses a self-signed certificate by default.\nThese settings are not recommended for production use! The endpoint command supports a --syntax option, which can be used to turn configuration into a single line: Bash (default) - $ eval \"$(l0-setup endpoint --insecure instance_name )\" Powershell - $ l0-setup endpoint --insecure --syntax=powershell instance_name | Out-String | Invoke-Expression", - "title": "Part 4: Connect to a Layer0 Instance" - }, - { - "location": "/setup/upgrade/", - "text": "Upgrade a Layer0 Instance\n#\n\n\nThis section provides procedures for upgrading your Layer0 installation to the latest version.\nThis assumes you are using Layer0 version \nv0.10.0\n or later. \n\n\n\n\nNote\n\n\nLayer0 does not support updating MAJOR or MINOR versions in place unless explicitly stated otherwise.\nUsers will need to destroy and re-create Layer0 instances in these circumstances. \n\n\n\n\nRun the \nupgrade\n command, replacing \ninstance_name\n and \nversion\n with the name of the Layer0 instance and new version, respectively:\n\n\n$ l0-setup upgrade \ninstance_name\n \nversion\n\n\n\n\nThis will prompt you about the updated \nsource\n and \nversion\n inputs changing. \nIf you are not satisfied with the changes, exit the application during the prompts. \nFor full control on changing inputs, please use the \nset\n command. \n\n\nExample Usage\n\n\n$ l0-setup upgrade mylayer0 v0.10.1\nThis will update the 'version' input\n From: [v0.10.0]\n To: [v0.10.1]\n\n Press 'enter' to accept this change:\nThis will update the 'source' input\n From: [github.com/quintilesims/layer0//setup/module?ref=v0.10.0]\n To: [github.com/quintilesims/layer0//setup/module?ref=v0.10.1]\n\n Press 'enter' to accept this change:\n ...\n\nEverything looks good! You are now ready to run 'l0-setup apply mylayer0'\n\n\n\n\nAs stated by the command output, run the \napply\n command to apply the changes to the Layer0 instance.\nIf any errors occur, please contact the Layer0 team.", - "title": "Upgrade" - }, - { - "location": "/setup/upgrade/#upgrade-a-layer0-instance", - "text": "This section provides procedures for upgrading your Layer0 installation to the latest version.\nThis assumes you are using Layer0 version v0.10.0 or later. Note Layer0 does not support updating MAJOR or MINOR versions in place unless explicitly stated otherwise.\nUsers will need to destroy and re-create Layer0 instances in these circumstances. Run the upgrade command, replacing instance_name and version with the name of the Layer0 instance and new version, respectively: $ l0-setup upgrade instance_name version This will prompt you about the updated source and version inputs changing. \nIf you are not satisfied with the changes, exit the application during the prompts. \nFor full control on changing inputs, please use the set command. Example Usage $ l0-setup upgrade mylayer0 v0.10.1\nThis will update the 'version' input\n From: [v0.10.0]\n To: [v0.10.1]\n\n Press 'enter' to accept this change:\nThis will update the 'source' input\n From: [github.com/quintilesims/layer0//setup/module?ref=v0.10.0]\n To: [github.com/quintilesims/layer0//setup/module?ref=v0.10.1]\n\n Press 'enter' to accept this change:\n ...\n\nEverything looks good! You are now ready to run 'l0-setup apply mylayer0' As stated by the command output, run the apply command to apply the changes to the Layer0 instance.\nIf any errors occur, please contact the Layer0 team.", - "title": "Upgrade a Layer0 Instance" - }, - { - "location": "/setup/destroy/", - "text": "Destroying a Layer0 Instance\n#\n\n\nThis section provides procedures for destroying (deleting) a Layer0 instance.\n\n\nPart 1: Clean Up Your Layer0 Environments\n#\n\n\nIn order to destroy a Layer0 instance, you must first delete all environments in the instance.\nList all environments with:\n\n\n$ l0 environment list\n\n\n\n\nFor each environment listed in the previous step, with the exception of the environment named \napi\n, \nissue the following command (replacing \nenvironment_name\n with the name of the environment to delete):\n\n\nl0 environment delete --wait \nenvironment_name\n\n\n\n\nPart 2: Destroy the Layer0 Instance\n#\n\n\nOnce all environments have been deleted, the Layer0 instance can be deleted using the \nl0-setup\n tool. \nRun the following command (replacing \ninstance_name\n with the name of the Layer0 instance):\n\n\n$ l0-setup destroy \ninstance_name\n\n\n\n\nThe \ndestroy\n command is idempotent; if it fails, it is safe to re-attempt multiple times. \nIf the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources \nl0-setup\n is attempting to destroy. \nYou will need to manually remove these dependencies in order to get the \ndestroy\n command to complete successfully.", - "title": "Destroy" - }, - { - "location": "/setup/destroy/#destroying-a-layer0-instance", - "text": "This section provides procedures for destroying (deleting) a Layer0 instance.", - "title": "Destroying a Layer0 Instance" - }, - { - "location": "/setup/destroy/#part-1-clean-up-your-layer0-environments", - "text": "In order to destroy a Layer0 instance, you must first delete all environments in the instance.\nList all environments with: $ l0 environment list For each environment listed in the previous step, with the exception of the environment named api , \nissue the following command (replacing environment_name with the name of the environment to delete): l0 environment delete --wait environment_name", - "title": "Part 1: Clean Up Your Layer0 Environments" - }, - { - "location": "/setup/destroy/#part-2-destroy-the-layer0-instance", - "text": "Once all environments have been deleted, the Layer0 instance can be deleted using the l0-setup tool. \nRun the following command (replacing instance_name with the name of the Layer0 instance): $ l0-setup destroy instance_name The destroy command is idempotent; if it fails, it is safe to re-attempt multiple times. \nIf the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources l0-setup is attempting to destroy. \nYou will need to manually remove these dependencies in order to get the destroy command to complete successfully.", - "title": "Part 2: Destroy the Layer0 Instance" - }, - { - "location": "/guides/walkthrough/intro/", - "text": "An Iterative Walkthrough\n#\n\n\nThis guide aims to take you through three increasingly-complex deployment examples using Layer0.\nSuccessive sections build upon the previous ones, and each deployment can be completed either through the Layer0 CLI directly, or through Terraform using our custom \nLayer0 Terraform Provider\n.\n\n\nWe assume that you're using Layer0 v0.9.0 or later.\nIf you have not already installed and configured Layer0, see the \ninstallation guide\n.\nIf you are running an older version of Layer0, you may need to \nupgrade\n.\n\n\nIf you intend to deploy services using the Layer0 Terraform Provider, you'll want to make sure that you've \ninstalled\n the provider correctly.\n\n\nRegardless of the deployment method you choose, we maintain a \nguides repository\n that you should clone/download.\nIt contains all the files you will need to progress through this walkthrough.\nAs you do so, we will assume that your working directory matches the part of the guide that you're following (for example, Deployment 1 of this guide will assume that your working directory is \n.../walkthrough/deployment-1/\n).\n\n\nTable of Contents\n:\n\n\n\n\nDeployment 1\n: Deploying a web service (Guestbook)\n\n\nDeployment 2\n: Deploying Guestbook and a data store service (Redis)\n\n\nDeployment 3\n: Deploying Guestbook, Redis, and a service discovery service (Consul)", - "title": "Walkthrough: Introduction" - }, - { - "location": "/guides/walkthrough/intro/#an-iterative-walkthrough", - "text": "This guide aims to take you through three increasingly-complex deployment examples using Layer0.\nSuccessive sections build upon the previous ones, and each deployment can be completed either through the Layer0 CLI directly, or through Terraform using our custom Layer0 Terraform Provider . We assume that you're using Layer0 v0.9.0 or later.\nIf you have not already installed and configured Layer0, see the installation guide .\nIf you are running an older version of Layer0, you may need to upgrade . If you intend to deploy services using the Layer0 Terraform Provider, you'll want to make sure that you've installed the provider correctly. Regardless of the deployment method you choose, we maintain a guides repository that you should clone/download.\nIt contains all the files you will need to progress through this walkthrough.\nAs you do so, we will assume that your working directory matches the part of the guide that you're following (for example, Deployment 1 of this guide will assume that your working directory is .../walkthrough/deployment-1/ ). Table of Contents : Deployment 1 : Deploying a web service (Guestbook) Deployment 2 : Deploying Guestbook and a data store service (Redis) Deployment 3 : Deploying Guestbook, Redis, and a service discovery service (Consul)", - "title": "An Iterative Walkthrough" - }, - { - "location": "/guides/walkthrough/deployment-1/", - "text": "Deployment 1: A Simple Guestbook App\n#\n\n\nIn this section you'll learn how different Layer0 commands work together to deploy applications to the cloud.\nThe example application in this section is a guestbook -- a web application that acts as a simple message board.\nYou can choose to complete this section using either \nthe Layer0 CLI\n or \nTerraform\n.\n\n\n\n\nDeploy with Layer0 CLI\n#\n\n\nIf you're following along, you'll want to be working in the \nwalkthrough/deployment-1/\n directory of your clone of the \nguides\n repo.\n\n\nFiles used in this deployment:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nGuestbook.Dockerrun.aws.json\n\n\nTemplate for running the Guestbook application\n\n\n\n\n\n\n\n\n\n\nPart 1: Create the Environment\n#\n\n\nThe first step in deploying an application with Layer0 is to create an environment.\nAn environment is a dedicated space in which one or more services can reside.\nHere, we'll create a new environment named \ndemo-env\n.\nAt the command prompt, execute the following:\n\n\nl0 environment create demo-env\n\n\nWe should see output like the following:\n\n\nENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\n\n\n\n\nWe can inspect our environments in a couple of different ways:\n\n\n\n\nl0 environment list\n will give us a brief summary of all environments:\n\n\n\n\nENVIRONMENT ID ENVIRONMENT NAME\ndemo00e6aa9 demo-env\napi api\n\n\n\n\n\n\nl0 environment get demo-env\n will show us more information about the \ndemo-env\n environment we just created:\n\n\n\n\nENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\n\n\n\n\n\n\nl0 environment get \\*\n illustrates wildcard matching (you could also have used \ndemo*\n in the above command), and it will return detailed information for \neach\n environment, not just one - it's like a detailed \nlist\n:\n\n\n\n\nENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\napi api 2 m3.medium\n\n\n\n\n\n\nPart 2: Create the Load Balancer\n#\n\n\nIn order to expose a web application to the public internet, we need to create a load balancer.\nA load balancer listens for web traffic at a specific address and directs that traffic to a Layer0 service.\n\n\nA load balancer also has a notion of a health check - a way to assess whether or not the service is healthy and running properly.\nBy default, Layer0 configures the health check of a load balancer based upon a simple TCP ping to port 80 every thirty seconds.\nAlso by default, this ping will timeout after five seconds of no response from the service, and two consecutive successes or failures are required for the service to be considered healthy or unhealthy.\n\n\nHere, we'll create a new load balancer named \nguestbook-lb\n inside of our environment named \ndemo-env\n.\nThe load balancer will listen on port 80, and forward that traffic along to port 80 in the Docker container using the HTTP protocol.\nSince the port configuration is already aligned with the default health check, we don't need to specify any health check configuration when we create this load balancer.\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer create --port 80:80/http demo-env guestbook-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env 80:80/HTTP true\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\nloadbalancer create\n: creates a new load balancer\n\n\n--port 80:80/HTTP\n: instructs the load balancer to forward requests from port 80 on the load balancer to port 80 in the EC2 instance using the HTTP protocol\n\n\ndemo-env\n: the name of the environment in which you are creating the load balancer\n\n\nguestbook-lb\n: a name for the load balancer itself\n\n\n\n\nYou can inspect load balancers in the same way that you inspected environments in Part 1.\nTry running the following commands to get an idea of the information available to you:\n\n\n\n\nl0 loadbalancer list\n\n\nl0 loadbalancer get guestbook-lb\n\n\nl0 loadbalancer get gues*\n\n\nl0 loadbalancer get \\*\n\n\n\n\n\n\nNote\n\n\nNotice that the load balancer \nlist\n and \nget\n outputs list an \nENVIRONMENT\n field - if you ever have load balancers (or other Layer0 entities) with the same name but in different environments, you can target a specific load balancer by qualifying it with its environment name:\n\n\n`l0 loadbalancer get demo-env:guestbook-lb`\n\n\n\n\n\n\nPart 3: Deploy the ECS Task Definition\n#\n\n\nThe \ndeploy\n command is used to specify the ECS task definition that outlines a web application.\nA deploy, once created, can be applied to multiple services - even across different environments!\n\n\nHere, we'll create a new deploy called \nguestbook-dpl\n that refers to the \nGuestbook.Dockerrun.aws.json\n file found in the guides reposiory.\nAt the command prompt, execute the following:\n\n\nl0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl\n\n\nWe should see output like the following:\n\n\nDEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.1 guestbook-dpl 1\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\ndeploy create\n: creates a new deployment and allows you to specify an ECS task definition\n\n\nGuestbook.Dockerrun.aws.json\n: the file name of the ECS task definition (use the full path of the file if it is not in your current working directory)\n\n\nguestbook-dpl\n: a name for the deploy, which you will use later when you create the service\n\n\n\n\n\n\nDeploy Versioning\n\n\nThe \nDEPLOY NAME\n and \nVERSION\n are combined to create a unique identifier for a deploy.\nIf you create additional deploys named \nguestbook-dpl\n, they will be assigned different version numbers.\n\n\nYou can always specify the latest version when targeting a deploy by using \ndeploy name\n:latest\n -- for example, \nguestbook-dpl:latest\n.\n\n\n\n\nDeploys support the same methods of inspection as environments and load balancers:\n\n\n\n\nl0 deploy list\n\n\nl0 deploy get guestbook*\n\n\nl0 deploy get guestbook:1\n\n\nl0 deploy get guestbook:latest\n\n\nl0 deploy get \\*\n\n\n\n\n\n\nPart 4: Create the Service\n#\n\n\nThe final stage of the deployment process involves using the \nservice\n command to create a new service and associate it with the environment, load balancer, and deploy that we created in the previous sections.\nThe service will execute the Docker containers which have been described in the deploy.\n\n\nHere, we'll create a new service called \nguestbook-svc\n. At the command prompt, execute the following:\n\n\nl0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest\n\n\nWe should see output like the following:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\nservice create\n: creates a new service\n\n\n--loadbalancer demo-env:guestbook-lb\n: the fully-qualified name of the load balancer; in this case, the load balancer named \nguestbook-lb\n in the environment named \ndemo-env\n. \n\n\n(It is not strictly necessary to use the fully qualified name of the load balancer, unless another load balancer with exactly the same name exists in a different environment.)\n\n\n\n\n\n\ndemo-env\n: the name of the environment you created in Part 1\n\n\nguestbook-svc\n: a name for the service you are creating\n\n\nguestbook-dpl\n: the name of the deploy that you created in Part 3\n\n\n\n\nLayer0 services can be queried using the same \nget\n and \nlist\n commands that we've come to expect by now.\n\n\n\n\nCheck the Status of the Service\n#\n\n\nAfter a service has been created, it may take several minutes for that service to completely finish deploying.\nA service's status may be checked by using the \nservice get\n command.\n\n\nLet's take a peek at our \nguestbook-svc\n service.\nAt the command prompt, execute the following:\n\n\nl0 service get demo-env:guestbook-svc\n\n\nIf we're quick enough, we'll be able to see the first stage of the process (this is what was output after running the \nservice create\n command up in Part 4).\nWe should see an asterisk (*) next to the name of the \nguestbook-dpl:1\n deploy, which indicates that the service is in a transitional state:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1\n\n\n\n\nIn the next phase of deployment, if we execute the \nservice get\n command again, we will see \n(1)\n in the \nScale\n column; this indicates that 1 copy of the service is transitioning to an active state:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 (1)\n\n\n\n\nIn the final phase of deployment, we will see \n1/1\n in the \nScale\n column; this indicates that the service is running 1 copy:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1 1/1\n\n\n\n\n\n\nGet the Application's URL\n#\n\n\nOnce the service has been completely deployed, we can obtain the URL for the application and launch it in a browser.\n\n\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer get demo-env:guestbook-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env guestbook-svc 80:80/HTTP true \nurl\n\n\n\n\nCopy the value shown in the \nURL\n column and paste it into a web browser.\nThe guestbook application will appear (once the service has completely finished deploying).\n\n\n\n\nLogs\n#\n\n\nOutput from a Service's docker containers may be acquired by running the following command:\n\n\nl0 service logs \nSERVICE\n\n\n\n\n\n\nCleanup\n#\n\n\nIf you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application.\n\n\nl0 environment delete demo-env\n\n\nHowever, if you intend to continue through \nDeployment 2\n, you will want to keep the resources you made in this section.\n\n\n\n\nDeploy with Terraform\n#\n\n\nInstead of using the Layer0 CLI directly, you can instead use our Terraform provider, and deploy using Terraform \n(\nlearn more\n)\n.\nYou can use Terraform with Layer0 and AWS to create \"fire-and-forget\" deployments for your applications.\n\n\nIf you're following along, you'll want to be working in the \nwalkthrough/deployment-1/\n directory of your clone of the \nguides\n repo.\n\n\nWe use these files to set up a Layer0 environment with Terraform:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nmain.tf\n\n\nProvisions resources; populates resources in template files\n\n\n\n\n\n\noutputs.tf\n\n\nValues that Terraform will yield during deployment\n\n\n\n\n\n\nterraform.tfstate\n\n\nTracks status of deployment \n(created and managed by Terraform)\n\n\n\n\n\n\nterraform.tfvars\n\n\nVariables specific to the environment and application(s)\n\n\n\n\n\n\nvariables.tf\n\n\nValues that Terraform will use during deployment\n\n\n\n\n\n\n\n\n*.tf\n: A Brief Aside\n#\n\n\nLet's take a moment to discuss the \n.tf\n files.\nThe names of these files (and even the fact that they are separated out into multiple files at all) are completely arbitrary and exist soley for human-readability.\nTerraform understands all \n.tf\n files in a directory all together.\n\n\nIn \nvariables.tf\n, you'll see \n\"endpoint\"\n and \n\"token\"\n variables.\n\n\nIn \noutputs.tf\n, you'll see that Terraform should spit out the url of the guestbook's load balancer once deployment has finished.\n\n\nIn \nmain.tf\n, you'll see the bulk of the deployment process.\nIf you've followed along with the Layer0 CLI deployment above, it should be fairly easy to see how blocks in this file map to steps in the CLI process.\nWhen we began the CLI deployment, our first step was to create an environment:\n\n\nl0 environment create demo-env\n\n\nThis command is recreated in \nmain.tf\n like so:\n\n\n# walkthrough/deployment-1/main.tf\n\nresource \nlayer0_environment\n \ndemo-env\n {\n name = \ndemo-env\n\n}\n\n\n\n\nWe've bundled up the heart of the Guestbook deployment (load balancer, deploy, service, etc.) into a \nTerraform module\n.\nTo use it, we declare a \nmodule\n block and pass in the source of the module as well as any configuration or variables that the module needs.\n\n\n# walkthrough/deployment-1/main.tf\n\nmodule \nguestbook\n {\n source = \ngithub.com/quintilesims/guides//guestbook/module\n\n environment_id = \n${layer0_environment.demo.id}\n\n}\n\n\n\n\nYou can see that we pass in the ID of the environment we create.\nAll variables declared in this block are passed to the module, so the next file we should look at is \nvariables.tf\n inside of the module to get an idea of what the module is expecting.\n\n\nThere are a lot of variables here, but only one of them doesn't have a default value.\n\n\n# guestbook/module/variables.tf\n\nvariable \nenvironment_id\n {\n description = \nid of the layer0 environment in which to create resources\n\n}\n\n\n\n\nYou'll notice that this is the variable that we're passing in.\nFor this particular deployment of the Guestbook, all of the default options are fine.\nWe could override any of them if we wanted to, just by specifying a new value for them back in \ndeployment-1/main.tf\n.\n\n\nNow that we've seen the variables that the module will have, let's take a look at part of \nmodule/main.tf\n and see how some of them might be used:\n\n\n# guestbook/module/main.tf\n\nresource \nlayer0_load_balancer\n \nguestbook-lb\n {\n name = \n${var.load_balancer_name}\n\n environment = \n${var.environment_id}\n\n port {\n host_port = 80\n container_port = 80\n protocol = \nhttp\n\n }\n}\n\n...\n\n\n\n\nYou can follow \nthis link\n to learn more about Layer0 resources in Terraform.\n\n\n\n\nPart 1: Terraform Get\n#\n\n\nThis deployment uses modules, so we'll need to fetch those source materials.\nAt the command prompt, execute the following command:\n\n\nterraform get\n\n\nWe should see output like the following:\n\n\nGet: git::https://github.com/quintilesims/guides.git\n\n\n\n\nWe should now have a new local directory called \n.terraform/\n.\nWe don't need to do anything with it; we just want to make sure it's there.\n\n\n\n\nPart 2: Terraform Plan\n#\n\n\nBefore we actually create/update/delete any resources, it's a good idea to find out what Terraform intends to do.\n\n\nRun \nterraform plan\n. Terraform will prompt you for configuration values that it does not have:\n\n\nvar.endpoint\n Enter a value:\n\nvar.token\n Enter a value:\n\n\n\n\nYou can find these values by running \nl0-setup endpoint \nyour layer0 prefix\n.\n\n\n\n\nNote\n\n\nThere are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the \nterraform.tfvars\n file, or exporting evironment variables like \nTF_VAR_endpoint\n and \nTF_VAR_token\n, for example). See the \nTerraform Docs\n for more.\n\n\n\n\nThe \nplan\n command should give us output like the following:\n\n\nRefreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.guestbook: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn't specify an \n-out\n parameter to save this plan, so when\n\napply\n is called, Terraform can't guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: \ncomputed\n\n cluster_count: \ncomputed\n\n links: \ncomputed\n\n name: \ndemo\n\n os: \nlinux\n\n security_group_id: \ncomputed\n\n size: \nm3.medium\n\n\n+ module.guestbook.layer0_deploy.guestbook\n content: \n{\\n \\\nAWSEBDockerrunVersion\\\n: 2,\\n \\\ncontainerDefinitions\\\n: [\\n {\\n \\\nname\\\n: \\\nguestbook\\\n,\\n \\\nimage\\\n: \\\nquintilesims/guestbook\\\n,\\n \\\nessential\\\n: true,\\n \\\nmemory\\\n: 128,\\n \\\nenvironment\\\n: [\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_TYPE\\\n,\\n \\\nvalue\\\n: \\\nmemory\\\n\\n },\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_CONFIG\\\n,\\n \\\nvalue\\\n: \\\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_ACCESS_KEY_ID\\\n,\\n \\\nvalue\\\n: \\\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_SECRET_ACCESS_KEY\\\n,\\n \\\nvalue\\\n: \\\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_REGION\\\n,\\n \\\nvalue\\\n: \\\nus-west-2\\\n\\n }\\n ],\\n \\\nportMappings\\\n: [\\n {\\n \\\nhostPort\\\n: 80,\\n \\\ncontainerPort\\\n: 80\\n }\\n ]\\n }\\n ]\\n}\\n\n\n name: \nguestbook\n\n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: \n${var.environment_id}\n\n health_check.#: \ncomputed\n\n name: \nguestbook\n\n port.#: \n1\n\n port.2027667003.certificate: \n\n port.2027667003.container_port: \n80\n\n port.2027667003.host_port: \n80\n\n port.2027667003.protocol: \nhttp\n\n url: \ncomputed\n\n\n+ module.guestbook.layer0_service.guestbook\n deploy: \n${ var.deploy_id == \\\n\\\n ? layer0_deploy.guestbook.id : var.deploy_id }\n\n environment: \n${var.environment_id}\n\n load_balancer: \n${layer0_load_balancer.guestbook.id}\n\n name: \nguestbook\n\n scale: \n1\n\n wait: \ntrue\n\n\n\nPlan: 4 to add, 0 to change, 0 to destroy.\n\n\n\n\nThis shows you that Terraform intends to create a deploy, an environment, a load balancer, and a service, all through Layer0.\n\n\nIf you've gone through this deployment using the \nLayer0 CLI\n, you may notice that these resources appear out of order - that's fine. Terraform presents these resources in alphabetical order, but underneath, it knows the correct order in which to create them.\n\n\nOnce we're satisfied that Terraform will do what we want it to do, we can move on to actually making these things exist!\n\n\n\n\nPart 3: Terraform Apply\n#\n\n\nRun \nterraform apply\n to begin the process.\n\n\nWe should see output like the following:\n\n\nlayer0_environment.demo: Refreshing state...\n...\n...\n...\nlayer0_service.guestbook: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = \nhttp endpoint for the sample application\n\n\n\n\n\n\nNote\n\n\nIt may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.\n\n\n\n\nWhat's Happening\n#\n\n\nTerraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment. Terraform also writes the state of your deployment to the \nterraform.tfstate\n file (creating a new one if it's not already there).\n\n\nCleanup\n#\n\n\nWhen you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application. Execute the following command (in the same directory):\n\n\nterraform destroy\n\n\nIt's also now safe to remove the \n.terraform/\n directory and the \n*.tfstate*\n files.", - "title": "Walkthrough: Deployment 1" - }, - { - "location": "/guides/walkthrough/deployment-1/#deployment-1-a-simple-guestbook-app", - "text": "In this section you'll learn how different Layer0 commands work together to deploy applications to the cloud.\nThe example application in this section is a guestbook -- a web application that acts as a simple message board.\nYou can choose to complete this section using either the Layer0 CLI or Terraform .", - "title": "Deployment 1: A Simple Guestbook App" - }, - { - "location": "/guides/walkthrough/deployment-1/#deploy-with-layer0-cli", - "text": "If you're following along, you'll want to be working in the walkthrough/deployment-1/ directory of your clone of the guides repo. Files used in this deployment: Filename Purpose Guestbook.Dockerrun.aws.json Template for running the Guestbook application", - "title": "Deploy with Layer0 CLI" - }, - { - "location": "/guides/walkthrough/deployment-1/#part-1-create-the-environment", - "text": "The first step in deploying an application with Layer0 is to create an environment.\nAn environment is a dedicated space in which one or more services can reside.\nHere, we'll create a new environment named demo-env .\nAt the command prompt, execute the following: l0 environment create demo-env We should see output like the following: ENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium We can inspect our environments in a couple of different ways: l0 environment list will give us a brief summary of all environments: ENVIRONMENT ID ENVIRONMENT NAME\ndemo00e6aa9 demo-env\napi api l0 environment get demo-env will show us more information about the demo-env environment we just created: ENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium l0 environment get \\* illustrates wildcard matching (you could also have used demo* in the above command), and it will return detailed information for each environment, not just one - it's like a detailed list : ENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\napi api 2 m3.medium", - "title": "Part 1: Create the Environment" - }, - { - "location": "/guides/walkthrough/deployment-1/#part-2-create-the-load-balancer", - "text": "In order to expose a web application to the public internet, we need to create a load balancer.\nA load balancer listens for web traffic at a specific address and directs that traffic to a Layer0 service. A load balancer also has a notion of a health check - a way to assess whether or not the service is healthy and running properly.\nBy default, Layer0 configures the health check of a load balancer based upon a simple TCP ping to port 80 every thirty seconds.\nAlso by default, this ping will timeout after five seconds of no response from the service, and two consecutive successes or failures are required for the service to be considered healthy or unhealthy. Here, we'll create a new load balancer named guestbook-lb inside of our environment named demo-env .\nThe load balancer will listen on port 80, and forward that traffic along to port 80 in the Docker container using the HTTP protocol.\nSince the port configuration is already aligned with the default health check, we don't need to specify any health check configuration when we create this load balancer.\nAt the command prompt, execute the following: l0 loadbalancer create --port 80:80/http demo-env guestbook-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env 80:80/HTTP true The following is a summary of the arguments passed in the above command: loadbalancer create : creates a new load balancer --port 80:80/HTTP : instructs the load balancer to forward requests from port 80 on the load balancer to port 80 in the EC2 instance using the HTTP protocol demo-env : the name of the environment in which you are creating the load balancer guestbook-lb : a name for the load balancer itself You can inspect load balancers in the same way that you inspected environments in Part 1.\nTry running the following commands to get an idea of the information available to you: l0 loadbalancer list l0 loadbalancer get guestbook-lb l0 loadbalancer get gues* l0 loadbalancer get \\* Note Notice that the load balancer list and get outputs list an ENVIRONMENT field - if you ever have load balancers (or other Layer0 entities) with the same name but in different environments, you can target a specific load balancer by qualifying it with its environment name: `l0 loadbalancer get demo-env:guestbook-lb`", - "title": "Part 2: Create the Load Balancer" - }, - { - "location": "/guides/walkthrough/deployment-1/#part-3-deploy-the-ecs-task-definition", - "text": "The deploy command is used to specify the ECS task definition that outlines a web application.\nA deploy, once created, can be applied to multiple services - even across different environments! Here, we'll create a new deploy called guestbook-dpl that refers to the Guestbook.Dockerrun.aws.json file found in the guides reposiory.\nAt the command prompt, execute the following: l0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl We should see output like the following: DEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.1 guestbook-dpl 1 The following is a summary of the arguments passed in the above command: deploy create : creates a new deployment and allows you to specify an ECS task definition Guestbook.Dockerrun.aws.json : the file name of the ECS task definition (use the full path of the file if it is not in your current working directory) guestbook-dpl : a name for the deploy, which you will use later when you create the service Deploy Versioning The DEPLOY NAME and VERSION are combined to create a unique identifier for a deploy.\nIf you create additional deploys named guestbook-dpl , they will be assigned different version numbers. You can always specify the latest version when targeting a deploy by using deploy name :latest -- for example, guestbook-dpl:latest . Deploys support the same methods of inspection as environments and load balancers: l0 deploy list l0 deploy get guestbook* l0 deploy get guestbook:1 l0 deploy get guestbook:latest l0 deploy get \\*", - "title": "Part 3: Deploy the ECS Task Definition" - }, - { - "location": "/guides/walkthrough/deployment-1/#part-4-create-the-service", - "text": "The final stage of the deployment process involves using the service command to create a new service and associate it with the environment, load balancer, and deploy that we created in the previous sections.\nThe service will execute the Docker containers which have been described in the deploy. Here, we'll create a new service called guestbook-svc . At the command prompt, execute the following: l0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest We should see output like the following: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 The following is a summary of the arguments passed in the above command: service create : creates a new service --loadbalancer demo-env:guestbook-lb : the fully-qualified name of the load balancer; in this case, the load balancer named guestbook-lb in the environment named demo-env . (It is not strictly necessary to use the fully qualified name of the load balancer, unless another load balancer with exactly the same name exists in a different environment.) demo-env : the name of the environment you created in Part 1 guestbook-svc : a name for the service you are creating guestbook-dpl : the name of the deploy that you created in Part 3 Layer0 services can be queried using the same get and list commands that we've come to expect by now.", - "title": "Part 4: Create the Service" - }, - { - "location": "/guides/walkthrough/deployment-1/#check-the-status-of-the-service", - "text": "After a service has been created, it may take several minutes for that service to completely finish deploying.\nA service's status may be checked by using the service get command. Let's take a peek at our guestbook-svc service.\nAt the command prompt, execute the following: l0 service get demo-env:guestbook-svc If we're quick enough, we'll be able to see the first stage of the process (this is what was output after running the service create command up in Part 4).\nWe should see an asterisk (*) next to the name of the guestbook-dpl:1 deploy, which indicates that the service is in a transitional state: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 In the next phase of deployment, if we execute the service get command again, we will see (1) in the Scale column; this indicates that 1 copy of the service is transitioning to an active state: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 (1) In the final phase of deployment, we will see 1/1 in the Scale column; this indicates that the service is running 1 copy: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1 1/1", - "title": "Check the Status of the Service" - }, - { - "location": "/guides/walkthrough/deployment-1/#get-the-applications-url", - "text": "Once the service has been completely deployed, we can obtain the URL for the application and launch it in a browser. At the command prompt, execute the following: l0 loadbalancer get demo-env:guestbook-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env guestbook-svc 80:80/HTTP true url Copy the value shown in the URL column and paste it into a web browser.\nThe guestbook application will appear (once the service has completely finished deploying).", - "title": "Get the Application's URL" - }, - { - "location": "/guides/walkthrough/deployment-1/#logs", - "text": "Output from a Service's docker containers may be acquired by running the following command: l0 service logs SERVICE", - "title": "Logs" - }, - { - "location": "/guides/walkthrough/deployment-1/#cleanup", - "text": "If you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application. l0 environment delete demo-env However, if you intend to continue through Deployment 2 , you will want to keep the resources you made in this section.", - "title": "Cleanup" - }, - { - "location": "/guides/walkthrough/deployment-1/#deploy-with-terraform", - "text": "Instead of using the Layer0 CLI directly, you can instead use our Terraform provider, and deploy using Terraform ( learn more ) .\nYou can use Terraform with Layer0 and AWS to create \"fire-and-forget\" deployments for your applications. If you're following along, you'll want to be working in the walkthrough/deployment-1/ directory of your clone of the guides repo. We use these files to set up a Layer0 environment with Terraform: Filename Purpose main.tf Provisions resources; populates resources in template files outputs.tf Values that Terraform will yield during deployment terraform.tfstate Tracks status of deployment (created and managed by Terraform) terraform.tfvars Variables specific to the environment and application(s) variables.tf Values that Terraform will use during deployment", - "title": "Deploy with Terraform" - }, - { - "location": "/guides/walkthrough/deployment-1/#tf-a-brief-aside", - "text": "Let's take a moment to discuss the .tf files.\nThe names of these files (and even the fact that they are separated out into multiple files at all) are completely arbitrary and exist soley for human-readability.\nTerraform understands all .tf files in a directory all together. In variables.tf , you'll see \"endpoint\" and \"token\" variables. In outputs.tf , you'll see that Terraform should spit out the url of the guestbook's load balancer once deployment has finished. In main.tf , you'll see the bulk of the deployment process.\nIf you've followed along with the Layer0 CLI deployment above, it should be fairly easy to see how blocks in this file map to steps in the CLI process.\nWhen we began the CLI deployment, our first step was to create an environment: l0 environment create demo-env This command is recreated in main.tf like so: # walkthrough/deployment-1/main.tf\n\nresource layer0_environment demo-env {\n name = demo-env \n} We've bundled up the heart of the Guestbook deployment (load balancer, deploy, service, etc.) into a Terraform module .\nTo use it, we declare a module block and pass in the source of the module as well as any configuration or variables that the module needs. # walkthrough/deployment-1/main.tf\n\nmodule guestbook {\n source = github.com/quintilesims/guides//guestbook/module \n environment_id = ${layer0_environment.demo.id} \n} You can see that we pass in the ID of the environment we create.\nAll variables declared in this block are passed to the module, so the next file we should look at is variables.tf inside of the module to get an idea of what the module is expecting. There are a lot of variables here, but only one of them doesn't have a default value. # guestbook/module/variables.tf\n\nvariable environment_id {\n description = id of the layer0 environment in which to create resources \n} You'll notice that this is the variable that we're passing in.\nFor this particular deployment of the Guestbook, all of the default options are fine.\nWe could override any of them if we wanted to, just by specifying a new value for them back in deployment-1/main.tf . Now that we've seen the variables that the module will have, let's take a look at part of module/main.tf and see how some of them might be used: # guestbook/module/main.tf\n\nresource layer0_load_balancer guestbook-lb {\n name = ${var.load_balancer_name} \n environment = ${var.environment_id} \n port {\n host_port = 80\n container_port = 80\n protocol = http \n }\n}\n\n... You can follow this link to learn more about Layer0 resources in Terraform.", - "title": "*.tf: A Brief Aside" - }, - { - "location": "/guides/walkthrough/deployment-1/#part-1-terraform-get", - "text": "This deployment uses modules, so we'll need to fetch those source materials.\nAt the command prompt, execute the following command: terraform get We should see output like the following: Get: git::https://github.com/quintilesims/guides.git We should now have a new local directory called .terraform/ .\nWe don't need to do anything with it; we just want to make sure it's there.", - "title": "Part 1: Terraform Get" - }, - { - "location": "/guides/walkthrough/deployment-1/#part-2-terraform-plan", - "text": "Before we actually create/update/delete any resources, it's a good idea to find out what Terraform intends to do. Run terraform plan . Terraform will prompt you for configuration values that it does not have: var.endpoint\n Enter a value:\n\nvar.token\n Enter a value: You can find these values by running l0-setup endpoint your layer0 prefix . Note There are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the terraform.tfvars file, or exporting evironment variables like TF_VAR_endpoint and TF_VAR_token , for example). See the Terraform Docs for more. The plan command should give us output like the following: Refreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.guestbook: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn't specify an -out parameter to save this plan, so when apply is called, Terraform can't guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: computed \n cluster_count: computed \n links: computed \n name: demo \n os: linux \n security_group_id: computed \n size: m3.medium \n\n+ module.guestbook.layer0_deploy.guestbook\n content: {\\n \\ AWSEBDockerrunVersion\\ : 2,\\n \\ containerDefinitions\\ : [\\n {\\n \\ name\\ : \\ guestbook\\ ,\\n \\ image\\ : \\ quintilesims/guestbook\\ ,\\n \\ essential\\ : true,\\n \\ memory\\ : 128,\\n \\ environment\\ : [\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_TYPE\\ ,\\n \\ value\\ : \\ memory\\ \\n },\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_CONFIG\\ ,\\n \\ value\\ : \\ \\ \\n },\\n {\\n \\ name\\ : \\ AWS_ACCESS_KEY_ID\\ ,\\n \\ value\\ : \\ \\ \\n },\\n {\\n \\ name\\ : \\ AWS_SECRET_ACCESS_KEY\\ ,\\n \\ value\\ : \\ \\ \\n },\\n {\\n \\ name\\ : \\ AWS_REGION\\ ,\\n \\ value\\ : \\ us-west-2\\ \\n }\\n ],\\n \\ portMappings\\ : [\\n {\\n \\ hostPort\\ : 80,\\n \\ containerPort\\ : 80\\n }\\n ]\\n }\\n ]\\n}\\n \n name: guestbook \n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: ${var.environment_id} \n health_check.#: computed \n name: guestbook \n port.#: 1 \n port.2027667003.certificate: \n port.2027667003.container_port: 80 \n port.2027667003.host_port: 80 \n port.2027667003.protocol: http \n url: computed \n\n+ module.guestbook.layer0_service.guestbook\n deploy: ${ var.deploy_id == \\ \\ ? layer0_deploy.guestbook.id : var.deploy_id } \n environment: ${var.environment_id} \n load_balancer: ${layer0_load_balancer.guestbook.id} \n name: guestbook \n scale: 1 \n wait: true \n\n\nPlan: 4 to add, 0 to change, 0 to destroy. This shows you that Terraform intends to create a deploy, an environment, a load balancer, and a service, all through Layer0. If you've gone through this deployment using the Layer0 CLI , you may notice that these resources appear out of order - that's fine. Terraform presents these resources in alphabetical order, but underneath, it knows the correct order in which to create them. Once we're satisfied that Terraform will do what we want it to do, we can move on to actually making these things exist!", - "title": "Part 2: Terraform Plan" - }, - { - "location": "/guides/walkthrough/deployment-1/#part-3-terraform-apply", - "text": "Run terraform apply to begin the process. We should see output like the following: layer0_environment.demo: Refreshing state...\n...\n...\n...\nlayer0_service.guestbook: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = http endpoint for the sample application Note It may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.", - "title": "Part 3: Terraform Apply" - }, - { - "location": "/guides/walkthrough/deployment-1/#whats-happening", - "text": "Terraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment. Terraform also writes the state of your deployment to the terraform.tfstate file (creating a new one if it's not already there).", - "title": "What's Happening" - }, - { - "location": "/guides/walkthrough/deployment-1/#cleanup_1", - "text": "When you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application. Execute the following command (in the same directory): terraform destroy It's also now safe to remove the .terraform/ directory and the *.tfstate* files.", - "title": "Cleanup" - }, - { - "location": "/guides/walkthrough/deployment-2/", - "text": "Deployment 2: Guestbook + Redis\n#\n\n\nIn this section, we're going to add some complexity to the previous deployment.\n\nDeployment 1\n saw us create a simple guestbook application which kept its data in memory.\nBut what if that ever came down, either by intention or accident?\nIt would be easy enough to redeploy it, but all of the entered data would be lost.\nWhat if we wanted to scale the application to run more than one copy?\nFor this deployment, we're going to separate the data store from the guestbook application by creating a second Layer0 service which will house a Redis database server and linking it to the first.\nYou can choose to complete this section using either \nthe Layer0 CLI\n or \nTerraform\n.\n\n\n\n\nDeploy with Layer0 CLI\n#\n\n\nFor this example, we'll be working in the \nwalkthrough/deployment-2/\n directory of the \nguides\n repo.\nWe assume that you've completed the \nLayer0 CLI\n section of Deployment 1.\n\n\nFiles used in this deployment:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nGuestbook.Dockerrun.aws.json\n\n\nTemplate for running the Guestbook application\n\n\n\n\n\n\nRedis.Dockerrun.aws.json\n\n\nTemplate for running a Redis server\n\n\n\n\n\n\n\n\n\n\nPart 1: Create the Redis Load Balancer\n#\n\n\nBoth the Guestbook service and the Redis service will live in the same Layer0 environment, so we don't need to create one like we did in the first deployment.\nWe'll start by making a load balancer behind which the Redis service will be deployed.\n\n\nThe \nRedis.Dockerrun.aws.json\n task definition file we'll use is very simple - it just spins up a Redis server with the default configuration, which means that it will be serving on port 6379.\nOur load balancer needs to be able to forward TCP traffic to and from this port.\nAnd since we don't want the Redis server to be exposed to the public internet, we'll put it behind a private load balancer; private load balancers only accept traffic that originates from within their own environment.\nWe'll also need to specify a non-default healthcheck target, since the load balancer won't expose port 80.\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer create --port 6379:6379/tcp --private --healthcheck-target tcp:6379 demo-env redis-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env 6378:6379:TCP false\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\nloadbalancer create\n: creates a new load balancer\n\n\n--port 6379:6379/TCP\n: instructs the load balancer to forward requests from port 6379 on the load balancer to port 6379 in the EC2 instance using the TCP protocol\n\n\n--private\n: instructs the load balancer to ignore external traffic\n\n\n--healthcheck-target tcp:6379\n: instructs the load balancer to check the health of the service via TCP pings to port 6379\n\n\ndemo-env\n: the name of the environment in which the load balancer is being created\n\n\nredis-lb\n: a name for the load balancer itself\n\n\n\n\n\n\nPart 2: Deploy the ECS Task Definition\n#\n\n\nHere, we just need to create the deploy using the \nRedis.Dockerrun.aws.json\n task definition file.\nAt the command prompt, execute the following:\n\n\nl0 deploy create Redis.Dockerrun.aws.json redis-dpl\n\n\nWe should see output like the following:\n\n\nDEPLOY ID DEPLOY NAME VERSION\nredis-dpl.1 redis-dpl 1\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\ndeploy create\n: creates a new Layer0 Deploy and allows you to specify an ECS task definition\n\n\nRedis.Dockerrun.aws.json\n: the file name of the ECS task definition (use the full path of the file if it is not in your current working directory)\n\n\nredis-dpl\n: a name for the deploy, which we will use later when we create the service\n\n\n\n\n\n\nPart 3: Create the Redis Service\n#\n\n\nHere, we just need to pull the previous resources together into a service.\nAt the command prompt, execute the following:\n\n\nl0 service create --wait --loadbalancer demo-env:redis-lb demo-env redis-svc redis-dpl:latest\n\n\nWe should see output like the following:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nredislb16ae6 redis-svc demo-env redis-lb redis-dpl:1 0/1\n\n\n\n\nThe following is a summary of the arguments passed in the above commands:\n\n\n\n\nservice create\n: creates a new Layer0 Service\n\n\n--wait\n: instructs the CLI to keep hold of the shell until the service has been successfully deployed\n\n\n--loadbalancer demo-env:redis-lb\n: the fully-qualified name of the load balancer; in this case, the load balancer named \nredis-lb\n in the environment named \ndemo-env\n\n\n(Again, it's not strictly necessary to use the fully-qualified name of the load balancer as long as there isn't another load balancer with the same name in a different environment)\n\n\n\n\n\n\ndemo-env\n: the name of the environment in which the service is to reside\n\n\nredis-svc\n: a name for the service we're creating\n\n\nredis-dpl:latest\n: the name of the deploy the service will put into action\n\n\n(We use \n:\n to specify which deploy we want - \n:latest\n will always give us the most recently-created one.)\n\n\n\n\n\n\n\n\n\n\nPart 4: Check the Status of the Redis Service\n#\n\n\nAs in the first deployment, we can keep an eye on our service by using the \nservice get\n command:\n\n\nl0 service get redis-svc\n\n\nOnce the service has finished scaling, try looking at the service's logs to see the output that the Redis server creates:\n\n\nl0 service logs redis-svc\n\n\nAmong some warnings and information not important to this exercise and a fun bit of ASCII art, you should see something like the following:\n\n\n... # words and ASCII art\n1:M 05 Apr 23:29:47.333 * The server is now ready to accept connections on port 6379\n\n\n\n\nNow we just need to teach the Guestbook application how to talk with our Redis service.\n\n\n\n\nPart 5: Update the Guestbook Deploy\n#\n\n\nYou should see in \nwalkthrough/deployment-2/\n another \nGuestbook.Dockerrun.aws.json\n file.\nThis file is very similar to but not the same as the one in \ndeployment-1/\n - if you open it up, you can see the following additions:\n\n\n ...\n \nenvironment\n: [\n {\n \nname\n: \nGUESTBOOK_BACKEND_TYPE\n,\n \nvalue\n: \nredis\n\n },\n {\n \nname\n: \nGUESTBOOK_BACKEND_CONFIG\n,\n \nvalue\n: \nredis host and port here\n\n }\n ],\n ...\n\n\n\n\nThe \n\"GUESTBOOK_BACKEND_CONFIG\"\n variable is what will point the Guestbook application towards the Redis server.\nThe \nredis host and port here\n section needs to be replaced and populated in the following format:\n\n\nvalue\n: \nADDRESS_OF_REDIS_SERVER:PORT_THE_SERVER_IS_SERVING_ON\n\n\n\n\nWe already know that Redis is serving on port 6379, so let's go find the server's address.\nRemember, it lives behind a load balancer that we made, so run the following command:\n\n\nl0 loadbalancer get redis-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env redis-svc 6379:6379/TCP false internal-l0-\nyadda-yadda\n.elb.amazonaws.com\n\n\n\n\nCopy that \nURL\n value, replace \nredis host and port here\n with the \nURL\n value in \nGuestbook.Dockerrun.aws.json\n, append \n:6379\n to it, and save the file.\nIt should look something like the following:\n\n\n ...\n \nenvironment\n: [\n {\n \nname\n: \nGUESTBOOK_BACKEND_CONFIG\n,\n \nvalue\n: \ninternal-l0-\nyadda-yadda\n.elb.amazonaws.com:6379\n\n }\n ],\n ...\n\n\n\n\nNow, we can create an updated deploy:\n\n\nl0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl\n\n\nWe should see output like the following:\n\n\nDEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.2 guestbook-dpl 2\n\n\n\n\n\n\nPart 6: Update the Guestbook Service\n#\n\n\nAlmost all the pieces are in place!\nNow we just need to apply the new Guestbook deploy to the running Guestbook service:\n\n\nl0 service update guestbook-svc guestbook-dpl:latest\n\n\nAs the Guestbook service moves through the phases of its update process, we should see outputs like the following (if we keep an eye on the service with \nl0 service get guestbook-svc\n, that is):\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2* 1/1\n guestbook-dpl:1\n\n\n\n\nabove: \nguestbook-dpl:2\n is in a transitional state\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 2/1\n guestbook-dpl:1\n\n\n\n\nabove: both versions of the deployment are running at scale\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1\n guestbook-dpl:1*\n\n\n\n\nabove: \nguestbook-dpl:1\n is in a transitional state\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1\n\n\n\n\nabove: \nguestbook-dpl:1\n has been removed, and only \nguestbook-dpl:2\n remains\n\n\n\n\nPart 7: Prove It\n#\n\n\nYou should now be able to point your browser at the URL for the Guestbook load balancer (run \nl0 loadbalancer get guestbook-lb\n to find it) and see what looks like the same Guestbook application you deployed in the first section of the walkthrough.\nGo ahead and add a few entries, make sure it's functioning properly.\nWe'll wait.\n\n\nNow, let's prove that we've actually separated the data from the application by deleting and redeploying the Guestbook application:\n\n\nl0 service delete --wait guestbook-svc\n\n\n(We'll leave the \ndeploy\n intact so we can spin up a new service easily, and we'll leave the environment untouched because it also contained the Redis server.\nWe'll also pass the \n--wait\n flag so that we don't need to keep checking on the status of the job to know when it's complete.)\n\n\nOnce those resources have been deleted, we can recreate them!\n\n\nCreate another service, using the \nguestbook-dpl\n deploy we kept around:\n\n\nl0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest\n\n\nWait for everything to spin up, and hit that new load balancer's url (\nl0 loadbalancer get guestbook-lb\n) with your browser.\nYour data should still be there!\n\n\n\n\nCleanup\n#\n\n\nIf you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application.\n\n\nl0 environment delete demo-env\n\n\nHowever, if you intend to continue through \nDeployment 3\n, you will want to keep the resources you made in this section.\n\n\n\n\nDeploy with Terraform\n#\n\n\nAs before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI. As before, we will assume that you've cloned the \nguides\n repo and are working in the \nwalkthrough/deployment-2/\n directory.\n\n\nWe'll use these files to manage our deployment with Terraform:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nmain.tf\n\n\nProvisions resources; populates variables in template files\n\n\n\n\n\n\noutputs.tf\n\n\nValues that Terraform will yield during deployment\n\n\n\n\n\n\nterraform.tfstate\n\n\nTracks status of deployment \n(created and managed by Terraform)\n\n\n\n\n\n\nterraform.tfvars\n\n\nVariables specific to the environment and application(s)\n\n\n\n\n\n\nvariables.tf\n\n\nValues that Terraform will use during deployment\n\n\n\n\n\n\n\n\n\n\n*.tf\n: A Brief Aside: Revisited\n#\n\n\nNot much is changed from \nDeployment 1\n.\nIn \nmain.tf\n, we pull in a new, second module that will deploy Redis for us.\nWe maintain this module as well; you can inspect \nthe repo\n if you'd like.\n\n\nIn \nmain.tf\n where we pull in the Guestbook module, you'll see that we're supplying more values than we did last time, because we need some additional configuration to let the Guestbook application use a Redis backend instead of its default in-memory storage.\n\n\n\n\nPart 1: Terraform Get\n#\n\n\nRun \nterraform get\n to pull down the source materials Terraform will use for deployment.\nThis will create a local \n.terraform/\n directory.\n\n\n\n\nPart 2: Terraform Plan\n#\n\n\nIt's always a good idea to find out what Terraform intends to do, so let's do that:\n\n\nterraform plan\n\n\nAs before, we'll be prompted for any variables Terraform needs and doesn't have (see the note in \nDeployment 1\n for configuring Terraform variables).\nWe'll see output similar to the following:\n\n\nRefreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.redis: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn't specify an \n-out\n parameter to save this plan, so when\n\napply\n is called, Terraform can't guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: \ncomputed\n\n cluster_count: \ncomputed\n\n links: \ncomputed\n\n name: \ndemo\n\n os: \nlinux\n\n security_group_id: \ncomputed\n\n size: \nm3.medium\n\n\n+ module.redis.layer0_deploy.redis\n content: \n{\\n \\\nAWSEBDockerrunVersion\\\n: 2,\\n \\\ncontainerDefinitions\\\n: [\\n {\\n \\\nname\\\n: \\\nredis\\\n,\\n \\\nimage\\\n: \\\nredis:3.2-alpine\\\n,\\n \\\nessential\\\n: true,\\n \\\nmemory\\\n: 128,\\n \\\nportMappings\\\n: [\\n {\\n \\\nhostPort\\\n: 6379,\\n \\\ncontainerPort\\\n: 6379\\n }\\n ]\\n }\\n ]\\n}\\n\\n\n\n name: \nredis\n\n\n+ module.redis.layer0_load_balancer.redis\n environment: \n${var.environment_id}\n\n health_check.#: \ncomputed\n\n name: \nredis\n\n port.#: \n1\n\n port.1072619732.certificate: \n\n port.1072619732.container_port: \n6379\n\n port.1072619732.host_port: \n6379\n\n port.1072619732.protocol: \ntcp\n\n private: \ntrue\n\n url: \ncomputed\n\n\n+ module.redis.layer0_service.redis\n deploy: \n${ var.deploy_id == \\\n\\\n ? layer0_deploy.redis.id : var.deploy_id }\n\n environment: \n${var.environment_id}\n\n load_balancer: \n${layer0_load_balancer.redis.id}\n\n name: \nredis\n\n scale: \n1\n\n wait: \ntrue\n\n\n\n= module.guestbook.data.template_file.guestbook\n rendered: \ncomputed\n\n template: \n{\\n \\\nAWSEBDockerrunVersion\\\n: 2,\\n \\\ncontainerDefinitions\\\n: [\\n {\\n \\\nname\\\n: \\\nguestbook\\\n,\\n \\\nimage\\\n: \\\nquintilesims/guestbook\\\n,\\n \\\nessential\\\n: true,\\n \\\nmemory\\\n: 128,\\n \\\nenvironment\\\n: [\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_TYPE\\\n,\\n \\\nvalue\\\n: \\\n${backend_type}\\\n\\n },\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_CONFIG\\\n,\\n \\\nvalue\\\n: \\\n${backend_config}\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_ACCESS_KEY_ID\\\n,\\n \\\nvalue\\\n: \\\n${access_key}\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_SECRET_ACCESS_KEY\\\n,\\n \\\nvalue\\\n: \\\n${secret_key}\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_REGION\\\n,\\n \\\nvalue\\\n: \\\n${region}\\\n\\n }\\n ],\\n \\\nportMappings\\\n: [\\n {\\n \\\nhostPort\\\n: 80,\\n \\\ncontainerPort\\\n: 80\\n }\\n ]\\n }\\n ]\\n}\\n\n\n vars.%: \ncomputed\n\n\n+ module.guestbook.layer0_deploy.guestbook\n content: \n${data.template_file.guestbook.rendered}\n\n name: \nguestbook\n\n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: \n${var.environment_id}\n\n health_check.#: \ncomputed\n\n name: \nguestbook\n\n port.#: \n1\n\n port.2027667003.certificate: \n\n port.2027667003.container_port: \n80\n\n port.2027667003.host_port: \n80\n\n port.2027667003.protocol: \nhttp\n\n url: \ncomputed\n\n\n+ module.guestbook.layer0_service.guestbook\n deploy: \n${ var.deploy_id == \\\n\\\n ? layer0_deploy.guestbook.id : var.deploy_id }\n\n environment: \n${var.environment_id}\n\n load_balancer: \n${layer0_load_balancer.guestbook.id}\n\n name: \nguestbook\n\n scale: \n2\n\n wait: \ntrue\n\n\n\nPlan: 7 to add, 0 to change, 0 to destroy.\n\n\n\n\nWe should see that Terraform intends to add 7 new resources, some of which are for the Guestbook deployment and some of which are for the Redis deployment.\n\n\n\n\nPart 2: Terraform Apply\n#\n\n\nRun \nterraform apply\n, and we should see output similar to the following:\n\n\ndata.template_file.redis: Refreshing state...\nlayer0_deploy.redis-dpl: Creating...\n\n...\n...\n...\n\nlayer0_service.guestbook-svc: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = \nhttp endpoint for the sample application\n\n\n\n\n\n\nNote\n\n\nIt may take a few minutes for the guestbook service to launch and the load balancer to become available.\nDuring that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.\n\n\n\n\nWhat's Happening\n#\n\n\nTerraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment.\nTerraform also writes the state of your deployment to the \nterraform.tfstate\n file (creating a new one if it's not already there).\n\n\nCleanup\n#\n\n\nWhen you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application.\nExecute the following command (in the same directory):\n\n\nterraform destroy\n\n\nIt's also now safe to remove the \n.terraform/\n directory and the \n*.tfstate*\n files.", - "title": "Walkthrough: Deployment 2" - }, - { - "location": "/guides/walkthrough/deployment-2/#deployment-2-guestbook-redis", - "text": "In this section, we're going to add some complexity to the previous deployment. Deployment 1 saw us create a simple guestbook application which kept its data in memory.\nBut what if that ever came down, either by intention or accident?\nIt would be easy enough to redeploy it, but all of the entered data would be lost.\nWhat if we wanted to scale the application to run more than one copy?\nFor this deployment, we're going to separate the data store from the guestbook application by creating a second Layer0 service which will house a Redis database server and linking it to the first.\nYou can choose to complete this section using either the Layer0 CLI or Terraform .", - "title": "Deployment 2: Guestbook + Redis" - }, - { - "location": "/guides/walkthrough/deployment-2/#deploy-with-layer0-cli", - "text": "For this example, we'll be working in the walkthrough/deployment-2/ directory of the guides repo.\nWe assume that you've completed the Layer0 CLI section of Deployment 1. Files used in this deployment: Filename Purpose Guestbook.Dockerrun.aws.json Template for running the Guestbook application Redis.Dockerrun.aws.json Template for running a Redis server", - "title": "Deploy with Layer0 CLI" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-1-create-the-redis-load-balancer", - "text": "Both the Guestbook service and the Redis service will live in the same Layer0 environment, so we don't need to create one like we did in the first deployment.\nWe'll start by making a load balancer behind which the Redis service will be deployed. The Redis.Dockerrun.aws.json task definition file we'll use is very simple - it just spins up a Redis server with the default configuration, which means that it will be serving on port 6379.\nOur load balancer needs to be able to forward TCP traffic to and from this port.\nAnd since we don't want the Redis server to be exposed to the public internet, we'll put it behind a private load balancer; private load balancers only accept traffic that originates from within their own environment.\nWe'll also need to specify a non-default healthcheck target, since the load balancer won't expose port 80.\nAt the command prompt, execute the following: l0 loadbalancer create --port 6379:6379/tcp --private --healthcheck-target tcp:6379 demo-env redis-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env 6378:6379:TCP false The following is a summary of the arguments passed in the above command: loadbalancer create : creates a new load balancer --port 6379:6379/TCP : instructs the load balancer to forward requests from port 6379 on the load balancer to port 6379 in the EC2 instance using the TCP protocol --private : instructs the load balancer to ignore external traffic --healthcheck-target tcp:6379 : instructs the load balancer to check the health of the service via TCP pings to port 6379 demo-env : the name of the environment in which the load balancer is being created redis-lb : a name for the load balancer itself", - "title": "Part 1: Create the Redis Load Balancer" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-2-deploy-the-ecs-task-definition", - "text": "Here, we just need to create the deploy using the Redis.Dockerrun.aws.json task definition file.\nAt the command prompt, execute the following: l0 deploy create Redis.Dockerrun.aws.json redis-dpl We should see output like the following: DEPLOY ID DEPLOY NAME VERSION\nredis-dpl.1 redis-dpl 1 The following is a summary of the arguments passed in the above command: deploy create : creates a new Layer0 Deploy and allows you to specify an ECS task definition Redis.Dockerrun.aws.json : the file name of the ECS task definition (use the full path of the file if it is not in your current working directory) redis-dpl : a name for the deploy, which we will use later when we create the service", - "title": "Part 2: Deploy the ECS Task Definition" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-3-create-the-redis-service", - "text": "Here, we just need to pull the previous resources together into a service.\nAt the command prompt, execute the following: l0 service create --wait --loadbalancer demo-env:redis-lb demo-env redis-svc redis-dpl:latest We should see output like the following: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nredislb16ae6 redis-svc demo-env redis-lb redis-dpl:1 0/1 The following is a summary of the arguments passed in the above commands: service create : creates a new Layer0 Service --wait : instructs the CLI to keep hold of the shell until the service has been successfully deployed --loadbalancer demo-env:redis-lb : the fully-qualified name of the load balancer; in this case, the load balancer named redis-lb in the environment named demo-env (Again, it's not strictly necessary to use the fully-qualified name of the load balancer as long as there isn't another load balancer with the same name in a different environment) demo-env : the name of the environment in which the service is to reside redis-svc : a name for the service we're creating redis-dpl:latest : the name of the deploy the service will put into action (We use : to specify which deploy we want - :latest will always give us the most recently-created one.)", - "title": "Part 3: Create the Redis Service" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-4-check-the-status-of-the-redis-service", - "text": "As in the first deployment, we can keep an eye on our service by using the service get command: l0 service get redis-svc Once the service has finished scaling, try looking at the service's logs to see the output that the Redis server creates: l0 service logs redis-svc Among some warnings and information not important to this exercise and a fun bit of ASCII art, you should see something like the following: ... # words and ASCII art\n1:M 05 Apr 23:29:47.333 * The server is now ready to accept connections on port 6379 Now we just need to teach the Guestbook application how to talk with our Redis service.", - "title": "Part 4: Check the Status of the Redis Service" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-5-update-the-guestbook-deploy", - "text": "You should see in walkthrough/deployment-2/ another Guestbook.Dockerrun.aws.json file.\nThis file is very similar to but not the same as the one in deployment-1/ - if you open it up, you can see the following additions: ...\n environment : [\n {\n name : GUESTBOOK_BACKEND_TYPE ,\n value : redis \n },\n {\n name : GUESTBOOK_BACKEND_CONFIG ,\n value : redis host and port here \n }\n ],\n ... The \"GUESTBOOK_BACKEND_CONFIG\" variable is what will point the Guestbook application towards the Redis server.\nThe redis host and port here section needs to be replaced and populated in the following format: value : ADDRESS_OF_REDIS_SERVER:PORT_THE_SERVER_IS_SERVING_ON We already know that Redis is serving on port 6379, so let's go find the server's address.\nRemember, it lives behind a load balancer that we made, so run the following command: l0 loadbalancer get redis-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env redis-svc 6379:6379/TCP false internal-l0- yadda-yadda .elb.amazonaws.com Copy that URL value, replace redis host and port here with the URL value in Guestbook.Dockerrun.aws.json , append :6379 to it, and save the file.\nIt should look something like the following: ...\n environment : [\n {\n name : GUESTBOOK_BACKEND_CONFIG ,\n value : internal-l0- yadda-yadda .elb.amazonaws.com:6379 \n }\n ],\n ... Now, we can create an updated deploy: l0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl We should see output like the following: DEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.2 guestbook-dpl 2", - "title": "Part 5: Update the Guestbook Deploy" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-6-update-the-guestbook-service", - "text": "Almost all the pieces are in place!\nNow we just need to apply the new Guestbook deploy to the running Guestbook service: l0 service update guestbook-svc guestbook-dpl:latest As the Guestbook service moves through the phases of its update process, we should see outputs like the following (if we keep an eye on the service with l0 service get guestbook-svc , that is): SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2* 1/1\n guestbook-dpl:1 above: guestbook-dpl:2 is in a transitional state SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 2/1\n guestbook-dpl:1 above: both versions of the deployment are running at scale SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1\n guestbook-dpl:1* above: guestbook-dpl:1 is in a transitional state SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1 above: guestbook-dpl:1 has been removed, and only guestbook-dpl:2 remains", - "title": "Part 6: Update the Guestbook Service" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-7-prove-it", - "text": "You should now be able to point your browser at the URL for the Guestbook load balancer (run l0 loadbalancer get guestbook-lb to find it) and see what looks like the same Guestbook application you deployed in the first section of the walkthrough.\nGo ahead and add a few entries, make sure it's functioning properly.\nWe'll wait. Now, let's prove that we've actually separated the data from the application by deleting and redeploying the Guestbook application: l0 service delete --wait guestbook-svc (We'll leave the deploy intact so we can spin up a new service easily, and we'll leave the environment untouched because it also contained the Redis server.\nWe'll also pass the --wait flag so that we don't need to keep checking on the status of the job to know when it's complete.) Once those resources have been deleted, we can recreate them! Create another service, using the guestbook-dpl deploy we kept around: l0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest Wait for everything to spin up, and hit that new load balancer's url ( l0 loadbalancer get guestbook-lb ) with your browser.\nYour data should still be there!", - "title": "Part 7: Prove It" - }, - { - "location": "/guides/walkthrough/deployment-2/#cleanup", - "text": "If you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application. l0 environment delete demo-env However, if you intend to continue through Deployment 3 , you will want to keep the resources you made in this section.", - "title": "Cleanup" - }, - { - "location": "/guides/walkthrough/deployment-2/#deploy-with-terraform", - "text": "As before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI. As before, we will assume that you've cloned the guides repo and are working in the walkthrough/deployment-2/ directory. We'll use these files to manage our deployment with Terraform: Filename Purpose main.tf Provisions resources; populates variables in template files outputs.tf Values that Terraform will yield during deployment terraform.tfstate Tracks status of deployment (created and managed by Terraform) terraform.tfvars Variables specific to the environment and application(s) variables.tf Values that Terraform will use during deployment", - "title": "Deploy with Terraform" - }, - { - "location": "/guides/walkthrough/deployment-2/#tf-a-brief-aside-revisited", - "text": "Not much is changed from Deployment 1 .\nIn main.tf , we pull in a new, second module that will deploy Redis for us.\nWe maintain this module as well; you can inspect the repo if you'd like. In main.tf where we pull in the Guestbook module, you'll see that we're supplying more values than we did last time, because we need some additional configuration to let the Guestbook application use a Redis backend instead of its default in-memory storage.", - "title": "*.tf: A Brief Aside: Revisited" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-1-terraform-get", - "text": "Run terraform get to pull down the source materials Terraform will use for deployment.\nThis will create a local .terraform/ directory.", - "title": "Part 1: Terraform Get" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-2-terraform-plan", - "text": "It's always a good idea to find out what Terraform intends to do, so let's do that: terraform plan As before, we'll be prompted for any variables Terraform needs and doesn't have (see the note in Deployment 1 for configuring Terraform variables).\nWe'll see output similar to the following: Refreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.redis: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn't specify an -out parameter to save this plan, so when apply is called, Terraform can't guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: computed \n cluster_count: computed \n links: computed \n name: demo \n os: linux \n security_group_id: computed \n size: m3.medium \n\n+ module.redis.layer0_deploy.redis\n content: {\\n \\ AWSEBDockerrunVersion\\ : 2,\\n \\ containerDefinitions\\ : [\\n {\\n \\ name\\ : \\ redis\\ ,\\n \\ image\\ : \\ redis:3.2-alpine\\ ,\\n \\ essential\\ : true,\\n \\ memory\\ : 128,\\n \\ portMappings\\ : [\\n {\\n \\ hostPort\\ : 6379,\\n \\ containerPort\\ : 6379\\n }\\n ]\\n }\\n ]\\n}\\n\\n \n name: redis \n\n+ module.redis.layer0_load_balancer.redis\n environment: ${var.environment_id} \n health_check.#: computed \n name: redis \n port.#: 1 \n port.1072619732.certificate: \n port.1072619732.container_port: 6379 \n port.1072619732.host_port: 6379 \n port.1072619732.protocol: tcp \n private: true \n url: computed \n\n+ module.redis.layer0_service.redis\n deploy: ${ var.deploy_id == \\ \\ ? layer0_deploy.redis.id : var.deploy_id } \n environment: ${var.environment_id} \n load_balancer: ${layer0_load_balancer.redis.id} \n name: redis \n scale: 1 \n wait: true = module.guestbook.data.template_file.guestbook\n rendered: computed \n template: {\\n \\ AWSEBDockerrunVersion\\ : 2,\\n \\ containerDefinitions\\ : [\\n {\\n \\ name\\ : \\ guestbook\\ ,\\n \\ image\\ : \\ quintilesims/guestbook\\ ,\\n \\ essential\\ : true,\\n \\ memory\\ : 128,\\n \\ environment\\ : [\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_TYPE\\ ,\\n \\ value\\ : \\ ${backend_type}\\ \\n },\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_CONFIG\\ ,\\n \\ value\\ : \\ ${backend_config}\\ \\n },\\n {\\n \\ name\\ : \\ AWS_ACCESS_KEY_ID\\ ,\\n \\ value\\ : \\ ${access_key}\\ \\n },\\n {\\n \\ name\\ : \\ AWS_SECRET_ACCESS_KEY\\ ,\\n \\ value\\ : \\ ${secret_key}\\ \\n },\\n {\\n \\ name\\ : \\ AWS_REGION\\ ,\\n \\ value\\ : \\ ${region}\\ \\n }\\n ],\\n \\ portMappings\\ : [\\n {\\n \\ hostPort\\ : 80,\\n \\ containerPort\\ : 80\\n }\\n ]\\n }\\n ]\\n}\\n \n vars.%: computed \n\n+ module.guestbook.layer0_deploy.guestbook\n content: ${data.template_file.guestbook.rendered} \n name: guestbook \n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: ${var.environment_id} \n health_check.#: computed \n name: guestbook \n port.#: 1 \n port.2027667003.certificate: \n port.2027667003.container_port: 80 \n port.2027667003.host_port: 80 \n port.2027667003.protocol: http \n url: computed \n\n+ module.guestbook.layer0_service.guestbook\n deploy: ${ var.deploy_id == \\ \\ ? layer0_deploy.guestbook.id : var.deploy_id } \n environment: ${var.environment_id} \n load_balancer: ${layer0_load_balancer.guestbook.id} \n name: guestbook \n scale: 2 \n wait: true \n\n\nPlan: 7 to add, 0 to change, 0 to destroy. We should see that Terraform intends to add 7 new resources, some of which are for the Guestbook deployment and some of which are for the Redis deployment.", - "title": "Part 2: Terraform Plan" - }, - { - "location": "/guides/walkthrough/deployment-2/#part-2-terraform-apply", - "text": "Run terraform apply , and we should see output similar to the following: data.template_file.redis: Refreshing state...\nlayer0_deploy.redis-dpl: Creating...\n\n...\n...\n...\n\nlayer0_service.guestbook-svc: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = http endpoint for the sample application Note It may take a few minutes for the guestbook service to launch and the load balancer to become available.\nDuring that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.", - "title": "Part 2: Terraform Apply" - }, - { - "location": "/guides/walkthrough/deployment-2/#whats-happening", - "text": "Terraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment.\nTerraform also writes the state of your deployment to the terraform.tfstate file (creating a new one if it's not already there).", - "title": "What's Happening" - }, - { - "location": "/guides/walkthrough/deployment-2/#cleanup_1", - "text": "When you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application.\nExecute the following command (in the same directory): terraform destroy It's also now safe to remove the .terraform/ directory and the *.tfstate* files.", - "title": "Cleanup" - }, - { - "location": "/guides/walkthrough/deployment-3/", - "text": "Deployment 3: Guestbook + Redis + Consul\n#\n\n\nIn \nDeployment 2\n, we created two services in the same environment and linked them together manually.\nWhile that can work for a small system, it's not really feasible for a system with a lot of moving parts - we would need to look up load balancer endpoints for all of our services and manually link them all together.\nTo that end, here we're going to to redeploy our two-service system using \nConsul\n, a service discovery tool.\n\n\nFor this deployment, we'll create a cluster of Consul servers which will be responsible for keeping track of the state of our system.\nWe'll also deploy new versions of the Guestbook and Redis task definition files - in addition to creating a container for its respective application, each task definition creates two other containers:\n\n\n\n\na container for a Consul agent, which is in charge of communicating with the Consul server cluster\n\n\na container for \nRegistrator\n, which is charge of talking to the local Consul agent when a service comes up or goes down.\n\n\n\n\nYou can choose to complete this section using either the \nLayer0 CLI\n or \nTerraform\n.\n\n\nDeploy with Layer0 CLI\n#\n\n\nIf you're following along, you'll want to be working in the \nwalkthrough/deployment-3/\n directory of your clone of the \nguides\n repo.\n\n\nFiles used in this deployment:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nCLI.Consul.Dockerrun.aws.json\n\n\nTemplate for running a Consul server\n\n\n\n\n\n\nCLI.Guestbook.Dockerrun.aws.json\n\n\nTemplate for running the Guestbook application with Registrator and Consul agent\n\n\n\n\n\n\nCLI.Redis.Dockerrun.aws.json\n\n\nTemplate for running a Redis server with Registrator and Consul agent\n\n\n\n\n\n\n\n\n\n\nPart 1: Create the Consul Load Balancer\n#\n\n\nThe Consul server cluster will live in the same environment as our Guestbook and Redis services - if you've completed \nDeployment 1\n and \nDeployment 2\n, this environment already exists as \ndemo-env\n.\nWe'll start by creating a load balancer for the Consul cluster.\nThe load balancer will be private since only Layer0 services need to communicate with the Consul cluster.\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer create --port 8500:8500/tcp --port 8301:8301/tcp --private --healthcheck-target tcp:8500 demo-env consul-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nconsull66b23 consul-lb consul-env 8500:8500/TCP false\n 8301:8301/TCP\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\nloadbalancer create\n: creates a new load balancer\n\n\n--port 8500:8500/tcp\n: instructs the load balancer to forward requests from port 8500 on the load balancer to port 8500 in the EC2 instance using the TCP protocol\n\n\n--port 8301:8301/tcp\n: instructs the load balancer to forward requests from port 8301 on the load balancer to port 8301 in the EC2 instance using the TCP protocol\n\n\n--private\n: instructs the load balancer to ignore outside traffic\n\n\n--healthcheck-target\n: instructs the load balancer to use a TCP ping on port 8500 as the basis for deciding whether the service is healthy\n\n\ndemo-env\n: the name of the environment in which the load balancer is being created\n\n\nconsul-lb\n: a name for the load balancer itself\n\n\n\n\nWhile we're touching on the Consul load balancer, we should grab its URL - this is the one value that we'll need to know in order to deploy the rest of our system, no matter how large it may get.\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer get consul-lb\n\n\nWe should see output that looks like the output we just received above after creating the load balancer, but this time there is something in the \nURL\n column.\nThat URL is the value we're looking for.\nMake note of it for when we reference it later.\n\n\n\n\nPart 2: Deploy the Consul Task Definition\n#\n\n\nBefore we can create the deploy, we need to supply the URL of the Consul load balancer that we got in Part 1.\nIn \nCLI.Consul.Dockerrun.aws.json\n, find the entry in the \nenvironment\n block that looks like this:\n\n\n{\n \nname\n: \nCONSUL_SERVER_URL\n,\n \nvalue\n: \n\n}\n\n\n\n\nUpdate the \"value\" with the Consul load balancer's URL into and save the file.\nWe can then create the deploy.\nAt the command prompt, execute the following:\n\n\nl0 deploy create CLI.Consul.Dockerrun.aws.json consul-dpl\n\n\nWe should see output like the following:\n\n\nDEPLOY ID DEPLOY NAME VERSION\nconsul-dpl.1 consul-dpl 1\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\ndeploy create\n: creates a new Layer0 Deploy and allows you to specifiy an ECS task definition\n\n\nCLI.Consul.Dockerrun.aws.json\n: the file name of the ECS task definition (use the full path of the file if it is not in the current working directory)\n\n\nconsul-dpl\n: a name for the deploy, which will later be used in creating the service\n\n\n\n\n\n\nPart 3: Create the Consul Service\n#\n\n\nHere, we pull the previous resources together to create a service.\nAt the command prompt, execute the following:\n\n\nl0 service create --wait --loadbalancer demo-env:consul-lb demo-env consul-svc consul-dpl:latest\n\n\nWe should see output like the following:\n\n\nWaiting for Deployment...\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nconsuls2f3c6 consul-svc demo-env consul-lb consul-dpl:1 1/1\n\n\n\n\nThe following is a summary of the arguments passed in the above commands:\n\n\n\n\nservice create\n: creates a new Layer0 Service\n\n\n--wait\n: instructs the CLI to keep hold of the shell until the service has been successfully deployed\n\n\n--loadbalancer demo-env:consul-lb\n: the fully-qualified name of the load balancer behind which the service should live; in this case, the load balancer named \nconsul-lb\n in the environment named \ndemo-env\n\n\ndemo-env\n: the name of the environment in which the service is to reside\n\n\nconsul-svc\n: a name for the service itself\n\n\nconsul-dpl:latest\n: the name and version of the deploy that the service should put into action\n\n\n\n\nOnce the service has finished being deployed (and \n--wait\n has returned our shell to us), we need to scale the service.\n\n\nCurrently, we only have one Consul server running in the cluster.\nFor best use, we should have at least 3 servers running (see \nthis link\n for more details on Consul servers and their concensus protocol).\nIndeed, if we inspect the \ncommand\n block of the task definition file, we can find the following parameter: \n-bootstrap-expect=3\n.\nThis tells the Consul server that we have just deployed that it should be expecting a total of three servers.\nWe still need to fulfill that expectation, so we'll scale our service up to three.\nAt the command prompt, execute the following:\n\n\nl0 service scale --wait consul-svc 3\n\n\nWe should see output like the following:\n\n\nWaiting for Deployment...\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nconsuls2f3c6 consul-svc demo-env consul-lb consul-dpl:1 3/3\n\n\n\n\n\n\nImportant!\n\n\nThe successful completion of the \nscale\n command doesn't mean that we're ready to move on just yet!\nWe need to check in on the logs (\nl0 service logs consul-svc\n) until we can confirm that all three of the Consul servers have synced up with each other.\nEach \nconsul-server\n section in the logs should be ending with \nconsul: Adding LAN server [ip address]\n or \nagent: Join completed\n.\nIf you see one of the sections ending with \nagent: Join failed, retrying in 30s\n, you need to wait for that server to join the cluster before continuing.\n\n\n\n\n\n\nPart 4: Update and Redeploy the Redis and Guestbook Applications\n#\n\n\nWe're going to need the URL of the Consul load balancer again.\nIn each of the CLI.Redis and CLI.Guestbook task definition files, look for the \nCONSUL_SERVER_URL\n block in the \nconsul-agent\n container and populate the value field with the Consul load balancer's URL, then save the file.\nAt the command prompt, execute the two following commands to create new versions of the deploys for the Redis and Guestbook applications:\n\n\nl0 deploy create CLI.Redis.Dockerrun.aws.json redis-dpl\n\n\nl0 deploy create CLI.Guestbook.Dockerrun.aws.json guestbook-dpl\n\n\nThen, execute the two following commands to redeploy the existing Redis and Guestbook services using those new deploys:\n\n\nl0 service update --wait redis-svc redis-dpl:latest\n\n\nl0 service update --wait guestbook-svc guestbook-dpl:latest\n\n\n\n\nNote\n\n\nHere, we should run \nl0 service logs consul-svc\n again and confirm that the Consul cluster has discovered these two services.\n\n\n\n\nWe can use \nl0 loadbalancer get guestbook-lb\n to obtain the guestbook application's URL, and then navigate to it with a web browser.\nOur guestbook app should be up and running - this time, it's been deployed without needing to know the address of the Redis backend!\n\n\nOf course, this is a simple example; in both this deployment and \nDeployment 2\n, we needed to use \nl0 loadbalancer get\n to obtain the URL of a load balancer.\nHowever, in a system with many services that uses Consul like this example, we only ever need to find the URL of the Consul cluster - not the URLs of every service that needs to talk to another of our services.\n\n\n\n\nPart 5: Inspect the Consul Universe (Optional)\n#\n\n\nLet's take a glimpse into how this system that we've deployed works.\n\nThis requires that we have access to the key pair we've told Layer0 about when we \nset it up\n.\n\n\nOpen Ports for SSH\n#\n\n\nWe want to SSH into the Guestbook EC2 instance, which means that we need to tell the Guestbook load balancer to allow SSH traffic through.\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer addport guestbook-lb 22:22/tcp\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env guestbook-svc 80:80/HTTP true \nurl\n\n 22:22/TCP\n\n\n\n\nWe need to take note of the load balancer's URL here, too.\n\n\nSSH Into the Instance\n#\n\n\nAt the command prompt, execute the following:\n\n\nssh -i /path/to/keypair ec2-user@\nguestbook_load_balancer_url\n -o ServerAliveInterval=30\n\n\n(We'll probably be asked if we want to continue connecting - we do, so we'll enter \nyes\n.)\n\n\nSummary of arguments passed into the above command:\n\n\n\n\n-i /path/to/keypair\n: this allows us to specify an identity file for use when connecting to the remote machine - in this case, we want to replace \n/path/to/keypair\n with the actual path to the keypair we created when we set up Layer0\n\n\nec2-user@\nguestbook_load_balancer_url\n: the address (here we want to replace \nguestbook_load_balancer_url\n with the actual URL of the guestbook load balancer) of the machine to which we want to connect and the name of the user (\nec2-user\n) that we'd like to connect as\n\n\n-o\n: allows us to set parameters on the \nssh\n command\n\n\nServerAliveInterval=30\n: one of those \nssh\n parameters - AWS imposes an automatic disconnect if a connection is not active for a certain amount of time, so we use this option to ping every 30 seconds to prevent that automatic disconnect\n\n\n\n\nLook Around You\n#\n\n\nWe're now inside of the EC2 instance!\nIf we run \ndocker ps\n, we should see that our three Docker containers (the Guestbook app, a Consul agent, and Registrator) are up and running, as well as an \namazon-ecs-agent\n image.\nBut that's not the Consul universe that we came here to see.\nAt the EC2 instance's command prompt, execute the following:\n\n\necho $(curl -s localhost:8500/v1/catalog/services) | jq '.'\n\n\nWe should see output like the following:\n\n\n{\n \nconsul\n: [],\n \nconsul-8301\n: [\n \nudp\n\n ],\n \nconsul-8500\n: [],\n \nconsul-8600\n: [\n \nudp\n\n ],\n \nguestbook-redis\n: [],\n \nredis\n: []\n}\n\n\n\n\nSummary of commands passed in the above command:\n\n\n\n\ncurl -s localhost:8500/v1/catalog/services\n: use \ncurl\n to send a GET request to the specified URL, where \nlocalhost:8500\n is an HTTP connection to the local Consul agent in this EC2 instance (the \n-s\n flag just silences excess output from \ncurl\n)\n\n\n| jq '.'\n: use a pipe (\n|\n) to take whatever returns from the left side of the pipe and pass it to the \njq\n program, which we use here simply to pretty-print the JSON response\n\n\necho $(...)\n: print out whatever returns from running the stuff inside of the parens; not necessary, but it gives us a nice newline after we get our response\n\n\n\n\nIn that output, we can see all of the things that our local Consul agent knows about.\nIn addition to a few connections to the Consul server cluster, we can see that it knows about the Guestbook application running in this EC2 instance, as well as the Redis application running in a different instance with its own Consul agent and Registrator.\n\n\nLet's take a closer look at the Redis service and see how our Guestbook application is locating our Redis application.\nAt the EC2 instance's command prompt, execute the following:\n\n\necho $(curl -s http://localhost:8500/v1/catalog/service/redis) | jq '.'\n\n\nWe should see output like the following:\n\n\n[\n {\n \nID\n: \nb4bb81e6-fe6a-c630-2553-7f6492ae5275\n,\n \nNode\n: \nip-10-100-230-97.us-west-2.compute.internal\n,\n \nAddress\n: \n10.100.230.97\n,\n \nDatacenter\n: \ndc1\n,\n \nTaggedAddresses\n: {\n \nlan\n: \n10.100.230.97\n,\n \nwan\n: \n10.100.230.97\n\n },\n \nNodeMeta\n: {},\n \nServiceID\n: \n562aceee6935:ecs-l0-tlakedev-redis-dpl-20-redis-e0f989e5af97cdfd0e00:6379\n,\n \nServiceName\n: \nredis\n,\n \nServiceTags\n: [],\n \nServiceAddress\n: \n10.100.230.97\n,\n \nServicePort\n: 6379,\n \nServiceEnableTagOverride\n: false,\n \nCreateIndex\n: 761,\n \nModifyIndex\n: 761\n }\n]\n\n\n\n\nTo \nreally\n see how the Guestbook application connects to Redis, we can take an \neven closer\n look!\n\n\nRun \ndocker ps\n to generate a listing of all the containers that Docker is running on the EC2 instance, and note the Container ID for the Guestbook container. Then run the following command to connect to the Guestbook container:\n\n\ndocker exec -it [container_id] /bin/sh\n\n\nOnce we've gotten inside the container, we'll run a similar command to the previous \ncurl\n:\n\n\ncurl -s consul-agent:8500/v1/catalog/service/redis\n\n\nOur Guestbook application makes a call like this one and figures out how to connect to the Redis service by mushing together the information from the \nServiceAddress\n and \nServicePort\n fields!\n\n\nTo close the \nssh\n connection to the EC2 instance, run \nexit\n in the command prompt.\n\n\n\n\nCleanup\n#\n\n\nWhen you're finished with the example, we can instruct Layer0 to terminate the applications and delete the environment.\n\n\nl0 environment delete demo-env\n\n\n\n\nDeploy with Terraform\n#\n\n\nAs before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI.\nAs before, we will assume that you've cloned the \nguides\n repo and are working in the \niterative-walkthrough/deployment-3/\n directory.\n\n\nWe'll use these files to manage our deployment with Terraform:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nGuestbook.Dockerrun.aws.json\n\n\nTemplate for running the Guestbook application\n\n\n\n\n\n\nmain.tf\n\n\nProvisions resources; populates variables in template files\n\n\n\n\n\n\noutputs.tf\n\n\nValues that Terraform will yield during deployment\n\n\n\n\n\n\nRedis.Dockerrun.aws.json\n\n\nTemplate for running the Redis application\n\n\n\n\n\n\nterraform.tfstate\n\n\nTracks status of deployment \n(created and managed by Terraform)\n\n\n\n\n\n\nterraform.tfvars\n\n\nVariables specific to the environment and application(s)\n\n\n\n\n\n\nvariables.tf\n\n\nValues that Terraform will use during deployment\n\n\n\n\n\n\n\n\n\n\n*.tf\n: A Brief Aside: Revisited: Redux\n#\n\n\nIn looking at \nmain.tf\n, you can see that we're pulling in a Consul module that we maintain (here's the \nrepo\n); this removes the need for a local task definition file.\n\n\nWe also are continuing to use modules for Redis and Guestbook.\nHowever, instead of just sourcing the module and passing in a value or two, you can see that we actually create new deploys from local task definition files and pass those deploys in to the module.\nThis design allows us to use pre-made modules while also offering a great deal of flexibility.\nIf you'd like to follow along the Redis deployment logic chain (the other applications/services work similarly), it goes something like this:\n\n\n\n\nmain.tf\n creates a deploy for the Redis server by rendering a local task definition and populating it with certain values\n\n\nmain.tf\n passes the ID of the deploy into the Redis module, along with other values the module requires\n\n\nthe Redis module\n pulls all the variables it knows about (both the defaults in \nvariables.tf\n as well as the ones passed in)\n\n\namong other Layer0/AWS resources, the module spins up a Redis service; since a deploy ID has been provided, it uses that deploy to create the service instead of a deploy made from a \ndefault task definition\n contained within the module\n\n\n\n\n\n\nPart 1: Terraform Get\n#\n\n\nRun \nterraform get\n to pull down all the source materials Terraform needs for our deployment.\n\n\n\n\nPart 2: Terraform Plan\n#\n\n\nAs before, we can run \nterraform plan\n to see what's going to happen.\nWe should see that there are 12 new resources to be created:\n\n\n\n\nthe environment\n\n\nthe two local deploys which will be used for Guestbook and Redis\n\n\nthe load balancer, deploy, and service from each of the Consul, Guestbook, and Redis modules\n\n\nnote that even though the default modules' deploys are created, they won't actually be used to deploy services\n\n\n\n\n\n\n\n\n\n\nPart 3: Terraform Apply\n#\n\n\nRun \nterraform apply\n, and we should see output similar to the following:\n\n\ndata.template_file.consul: Refreshing state...\nlayer0_deploy.consul-dpl: Creating...\n\n...\n...\n...\n\nlayer0_service.guestbook-svc: Creation complete\n\nApply complete! Resources: 10 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = \nhttp endpoint for the guestbook application\n\n\n\n\n\n\nNote\n\n\nIt may take a few minutes for the guestbook service to launch and the load balancer to become available.\nDuring that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.\n\n\n\n\nWhat's Happening\n#\n\n\nTerraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment.\nTerraform also writes the state of your deployment to the \nterraform.tfstate\n file (creating a new one if it's not already there).\n\n\nCleanup\n#\n\n\nWhen you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application.\nExecute the following command (in the same directory):\n\n\nterraform destroy\n\n\nIt's also now safe to remove the \n.terraform/\n directory and the \n*.tfstate*\n files.", - "title": "Walkthrough: Deployment 3" - }, - { - "location": "/guides/walkthrough/deployment-3/#deployment-3-guestbook-redis-consul", - "text": "In Deployment 2 , we created two services in the same environment and linked them together manually.\nWhile that can work for a small system, it's not really feasible for a system with a lot of moving parts - we would need to look up load balancer endpoints for all of our services and manually link them all together.\nTo that end, here we're going to to redeploy our two-service system using Consul , a service discovery tool. For this deployment, we'll create a cluster of Consul servers which will be responsible for keeping track of the state of our system.\nWe'll also deploy new versions of the Guestbook and Redis task definition files - in addition to creating a container for its respective application, each task definition creates two other containers: a container for a Consul agent, which is in charge of communicating with the Consul server cluster a container for Registrator , which is charge of talking to the local Consul agent when a service comes up or goes down. You can choose to complete this section using either the Layer0 CLI or Terraform .", - "title": "Deployment 3: Guestbook + Redis + Consul" - }, - { - "location": "/guides/walkthrough/deployment-3/#deploy-with-layer0-cli", - "text": "If you're following along, you'll want to be working in the walkthrough/deployment-3/ directory of your clone of the guides repo. Files used in this deployment: Filename Purpose CLI.Consul.Dockerrun.aws.json Template for running a Consul server CLI.Guestbook.Dockerrun.aws.json Template for running the Guestbook application with Registrator and Consul agent CLI.Redis.Dockerrun.aws.json Template for running a Redis server with Registrator and Consul agent", - "title": "Deploy with Layer0 CLI" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-1-create-the-consul-load-balancer", - "text": "The Consul server cluster will live in the same environment as our Guestbook and Redis services - if you've completed Deployment 1 and Deployment 2 , this environment already exists as demo-env .\nWe'll start by creating a load balancer for the Consul cluster.\nThe load balancer will be private since only Layer0 services need to communicate with the Consul cluster.\nAt the command prompt, execute the following: l0 loadbalancer create --port 8500:8500/tcp --port 8301:8301/tcp --private --healthcheck-target tcp:8500 demo-env consul-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nconsull66b23 consul-lb consul-env 8500:8500/TCP false\n 8301:8301/TCP The following is a summary of the arguments passed in the above command: loadbalancer create : creates a new load balancer --port 8500:8500/tcp : instructs the load balancer to forward requests from port 8500 on the load balancer to port 8500 in the EC2 instance using the TCP protocol --port 8301:8301/tcp : instructs the load balancer to forward requests from port 8301 on the load balancer to port 8301 in the EC2 instance using the TCP protocol --private : instructs the load balancer to ignore outside traffic --healthcheck-target : instructs the load balancer to use a TCP ping on port 8500 as the basis for deciding whether the service is healthy demo-env : the name of the environment in which the load balancer is being created consul-lb : a name for the load balancer itself While we're touching on the Consul load balancer, we should grab its URL - this is the one value that we'll need to know in order to deploy the rest of our system, no matter how large it may get.\nAt the command prompt, execute the following: l0 loadbalancer get consul-lb We should see output that looks like the output we just received above after creating the load balancer, but this time there is something in the URL column.\nThat URL is the value we're looking for.\nMake note of it for when we reference it later.", - "title": "Part 1: Create the Consul Load Balancer" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-2-deploy-the-consul-task-definition", - "text": "Before we can create the deploy, we need to supply the URL of the Consul load balancer that we got in Part 1.\nIn CLI.Consul.Dockerrun.aws.json , find the entry in the environment block that looks like this: {\n name : CONSUL_SERVER_URL ,\n value : \n} Update the \"value\" with the Consul load balancer's URL into and save the file.\nWe can then create the deploy.\nAt the command prompt, execute the following: l0 deploy create CLI.Consul.Dockerrun.aws.json consul-dpl We should see output like the following: DEPLOY ID DEPLOY NAME VERSION\nconsul-dpl.1 consul-dpl 1 The following is a summary of the arguments passed in the above command: deploy create : creates a new Layer0 Deploy and allows you to specifiy an ECS task definition CLI.Consul.Dockerrun.aws.json : the file name of the ECS task definition (use the full path of the file if it is not in the current working directory) consul-dpl : a name for the deploy, which will later be used in creating the service", - "title": "Part 2: Deploy the Consul Task Definition" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-3-create-the-consul-service", - "text": "Here, we pull the previous resources together to create a service.\nAt the command prompt, execute the following: l0 service create --wait --loadbalancer demo-env:consul-lb demo-env consul-svc consul-dpl:latest We should see output like the following: Waiting for Deployment...\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nconsuls2f3c6 consul-svc demo-env consul-lb consul-dpl:1 1/1 The following is a summary of the arguments passed in the above commands: service create : creates a new Layer0 Service --wait : instructs the CLI to keep hold of the shell until the service has been successfully deployed --loadbalancer demo-env:consul-lb : the fully-qualified name of the load balancer behind which the service should live; in this case, the load balancer named consul-lb in the environment named demo-env demo-env : the name of the environment in which the service is to reside consul-svc : a name for the service itself consul-dpl:latest : the name and version of the deploy that the service should put into action Once the service has finished being deployed (and --wait has returned our shell to us), we need to scale the service. Currently, we only have one Consul server running in the cluster.\nFor best use, we should have at least 3 servers running (see this link for more details on Consul servers and their concensus protocol).\nIndeed, if we inspect the command block of the task definition file, we can find the following parameter: -bootstrap-expect=3 .\nThis tells the Consul server that we have just deployed that it should be expecting a total of three servers.\nWe still need to fulfill that expectation, so we'll scale our service up to three.\nAt the command prompt, execute the following: l0 service scale --wait consul-svc 3 We should see output like the following: Waiting for Deployment...\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nconsuls2f3c6 consul-svc demo-env consul-lb consul-dpl:1 3/3 Important! The successful completion of the scale command doesn't mean that we're ready to move on just yet!\nWe need to check in on the logs ( l0 service logs consul-svc ) until we can confirm that all three of the Consul servers have synced up with each other.\nEach consul-server section in the logs should be ending with consul: Adding LAN server [ip address] or agent: Join completed .\nIf you see one of the sections ending with agent: Join failed, retrying in 30s , you need to wait for that server to join the cluster before continuing.", - "title": "Part 3: Create the Consul Service" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-4-update-and-redeploy-the-redis-and-guestbook-applications", - "text": "We're going to need the URL of the Consul load balancer again.\nIn each of the CLI.Redis and CLI.Guestbook task definition files, look for the CONSUL_SERVER_URL block in the consul-agent container and populate the value field with the Consul load balancer's URL, then save the file.\nAt the command prompt, execute the two following commands to create new versions of the deploys for the Redis and Guestbook applications: l0 deploy create CLI.Redis.Dockerrun.aws.json redis-dpl l0 deploy create CLI.Guestbook.Dockerrun.aws.json guestbook-dpl Then, execute the two following commands to redeploy the existing Redis and Guestbook services using those new deploys: l0 service update --wait redis-svc redis-dpl:latest l0 service update --wait guestbook-svc guestbook-dpl:latest Note Here, we should run l0 service logs consul-svc again and confirm that the Consul cluster has discovered these two services. We can use l0 loadbalancer get guestbook-lb to obtain the guestbook application's URL, and then navigate to it with a web browser.\nOur guestbook app should be up and running - this time, it's been deployed without needing to know the address of the Redis backend! Of course, this is a simple example; in both this deployment and Deployment 2 , we needed to use l0 loadbalancer get to obtain the URL of a load balancer.\nHowever, in a system with many services that uses Consul like this example, we only ever need to find the URL of the Consul cluster - not the URLs of every service that needs to talk to another of our services.", - "title": "Part 4: Update and Redeploy the Redis and Guestbook Applications" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-5-inspect-the-consul-universe-optional", - "text": "Let's take a glimpse into how this system that we've deployed works. This requires that we have access to the key pair we've told Layer0 about when we set it up .", - "title": "Part 5: Inspect the Consul Universe (Optional)" - }, - { - "location": "/guides/walkthrough/deployment-3/#open-ports-for-ssh", - "text": "We want to SSH into the Guestbook EC2 instance, which means that we need to tell the Guestbook load balancer to allow SSH traffic through.\nAt the command prompt, execute the following: l0 loadbalancer addport guestbook-lb 22:22/tcp We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env guestbook-svc 80:80/HTTP true url \n 22:22/TCP We need to take note of the load balancer's URL here, too.", - "title": "Open Ports for SSH" - }, - { - "location": "/guides/walkthrough/deployment-3/#ssh-into-the-instance", - "text": "At the command prompt, execute the following: ssh -i /path/to/keypair ec2-user@ guestbook_load_balancer_url -o ServerAliveInterval=30 (We'll probably be asked if we want to continue connecting - we do, so we'll enter yes .) Summary of arguments passed into the above command: -i /path/to/keypair : this allows us to specify an identity file for use when connecting to the remote machine - in this case, we want to replace /path/to/keypair with the actual path to the keypair we created when we set up Layer0 ec2-user@ guestbook_load_balancer_url : the address (here we want to replace guestbook_load_balancer_url with the actual URL of the guestbook load balancer) of the machine to which we want to connect and the name of the user ( ec2-user ) that we'd like to connect as -o : allows us to set parameters on the ssh command ServerAliveInterval=30 : one of those ssh parameters - AWS imposes an automatic disconnect if a connection is not active for a certain amount of time, so we use this option to ping every 30 seconds to prevent that automatic disconnect", - "title": "SSH Into the Instance" - }, - { - "location": "/guides/walkthrough/deployment-3/#look-around-you", - "text": "We're now inside of the EC2 instance!\nIf we run docker ps , we should see that our three Docker containers (the Guestbook app, a Consul agent, and Registrator) are up and running, as well as an amazon-ecs-agent image.\nBut that's not the Consul universe that we came here to see.\nAt the EC2 instance's command prompt, execute the following: echo $(curl -s localhost:8500/v1/catalog/services) | jq '.' We should see output like the following: {\n consul : [],\n consul-8301 : [\n udp \n ],\n consul-8500 : [],\n consul-8600 : [\n udp \n ],\n guestbook-redis : [],\n redis : []\n} Summary of commands passed in the above command: curl -s localhost:8500/v1/catalog/services : use curl to send a GET request to the specified URL, where localhost:8500 is an HTTP connection to the local Consul agent in this EC2 instance (the -s flag just silences excess output from curl ) | jq '.' : use a pipe ( | ) to take whatever returns from the left side of the pipe and pass it to the jq program, which we use here simply to pretty-print the JSON response echo $(...) : print out whatever returns from running the stuff inside of the parens; not necessary, but it gives us a nice newline after we get our response In that output, we can see all of the things that our local Consul agent knows about.\nIn addition to a few connections to the Consul server cluster, we can see that it knows about the Guestbook application running in this EC2 instance, as well as the Redis application running in a different instance with its own Consul agent and Registrator. Let's take a closer look at the Redis service and see how our Guestbook application is locating our Redis application.\nAt the EC2 instance's command prompt, execute the following: echo $(curl -s http://localhost:8500/v1/catalog/service/redis) | jq '.' We should see output like the following: [\n {\n ID : b4bb81e6-fe6a-c630-2553-7f6492ae5275 ,\n Node : ip-10-100-230-97.us-west-2.compute.internal ,\n Address : 10.100.230.97 ,\n Datacenter : dc1 ,\n TaggedAddresses : {\n lan : 10.100.230.97 ,\n wan : 10.100.230.97 \n },\n NodeMeta : {},\n ServiceID : 562aceee6935:ecs-l0-tlakedev-redis-dpl-20-redis-e0f989e5af97cdfd0e00:6379 ,\n ServiceName : redis ,\n ServiceTags : [],\n ServiceAddress : 10.100.230.97 ,\n ServicePort : 6379,\n ServiceEnableTagOverride : false,\n CreateIndex : 761,\n ModifyIndex : 761\n }\n] To really see how the Guestbook application connects to Redis, we can take an even closer look! Run docker ps to generate a listing of all the containers that Docker is running on the EC2 instance, and note the Container ID for the Guestbook container. Then run the following command to connect to the Guestbook container: docker exec -it [container_id] /bin/sh Once we've gotten inside the container, we'll run a similar command to the previous curl : curl -s consul-agent:8500/v1/catalog/service/redis Our Guestbook application makes a call like this one and figures out how to connect to the Redis service by mushing together the information from the ServiceAddress and ServicePort fields! To close the ssh connection to the EC2 instance, run exit in the command prompt.", - "title": "Look Around You" - }, - { - "location": "/guides/walkthrough/deployment-3/#cleanup", - "text": "When you're finished with the example, we can instruct Layer0 to terminate the applications and delete the environment. l0 environment delete demo-env", - "title": "Cleanup" - }, - { - "location": "/guides/walkthrough/deployment-3/#deploy-with-terraform", - "text": "As before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI.\nAs before, we will assume that you've cloned the guides repo and are working in the iterative-walkthrough/deployment-3/ directory. We'll use these files to manage our deployment with Terraform: Filename Purpose Guestbook.Dockerrun.aws.json Template for running the Guestbook application main.tf Provisions resources; populates variables in template files outputs.tf Values that Terraform will yield during deployment Redis.Dockerrun.aws.json Template for running the Redis application terraform.tfstate Tracks status of deployment (created and managed by Terraform) terraform.tfvars Variables specific to the environment and application(s) variables.tf Values that Terraform will use during deployment", - "title": "Deploy with Terraform" - }, - { - "location": "/guides/walkthrough/deployment-3/#tf-a-brief-aside-revisited-redux", - "text": "In looking at main.tf , you can see that we're pulling in a Consul module that we maintain (here's the repo ); this removes the need for a local task definition file. We also are continuing to use modules for Redis and Guestbook.\nHowever, instead of just sourcing the module and passing in a value or two, you can see that we actually create new deploys from local task definition files and pass those deploys in to the module.\nThis design allows us to use pre-made modules while also offering a great deal of flexibility.\nIf you'd like to follow along the Redis deployment logic chain (the other applications/services work similarly), it goes something like this: main.tf creates a deploy for the Redis server by rendering a local task definition and populating it with certain values main.tf passes the ID of the deploy into the Redis module, along with other values the module requires the Redis module pulls all the variables it knows about (both the defaults in variables.tf as well as the ones passed in) among other Layer0/AWS resources, the module spins up a Redis service; since a deploy ID has been provided, it uses that deploy to create the service instead of a deploy made from a default task definition contained within the module", - "title": "*.tf: A Brief Aside: Revisited: Redux" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-1-terraform-get", - "text": "Run terraform get to pull down all the source materials Terraform needs for our deployment.", - "title": "Part 1: Terraform Get" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-2-terraform-plan", - "text": "As before, we can run terraform plan to see what's going to happen.\nWe should see that there are 12 new resources to be created: the environment the two local deploys which will be used for Guestbook and Redis the load balancer, deploy, and service from each of the Consul, Guestbook, and Redis modules note that even though the default modules' deploys are created, they won't actually be used to deploy services", - "title": "Part 2: Terraform Plan" - }, - { - "location": "/guides/walkthrough/deployment-3/#part-3-terraform-apply", - "text": "Run terraform apply , and we should see output similar to the following: data.template_file.consul: Refreshing state...\nlayer0_deploy.consul-dpl: Creating...\n\n...\n...\n...\n\nlayer0_service.guestbook-svc: Creation complete\n\nApply complete! Resources: 10 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = http endpoint for the guestbook application Note It may take a few minutes for the guestbook service to launch and the load balancer to become available.\nDuring that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.", - "title": "Part 3: Terraform Apply" - }, - { - "location": "/guides/walkthrough/deployment-3/#whats-happening", - "text": "Terraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment.\nTerraform also writes the state of your deployment to the terraform.tfstate file (creating a new one if it's not already there).", - "title": "What's Happening" - }, - { - "location": "/guides/walkthrough/deployment-3/#cleanup_1", - "text": "When you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application.\nExecute the following command (in the same directory): terraform destroy It's also now safe to remove the .terraform/ directory and the *.tfstate* files.", - "title": "Cleanup" - }, - { - "location": "/guides/terraform_beyond_layer0/", - "text": "Deployment guide: Terraform beyond Layer0\n#\n\n\nIn this example, we'll learn how you can use Terraform to create a Layer0 service as well as a persistent data store. The main goal of this example is to explore how you can combine Layer0 with other Terraform providers and best practices.\n\n\nBefore you start\n#\n\n\nTo complete the procedures in this section, you must have the following installed and configured correctly:\n\n\n\n\nLayer0 v0.8.4 or later\n\n\nTerraform v0.9.0 or later\n\n\nLayer0 Terraform Provider\n\n\n\n\nIf you have not already configured Layer0, see the \nLayer0 installation guide\n. If you are running an older version of Layer0, see the \nLayer0 upgrade instructions\n.\n\n\nSee the \nTerraform installation guide\n to install Terraform and the Layer0 Terraform Plugin.\n\n\n\n\nDeploy with Terraform\n#\n\n\nUsing Terraform, you will deploy a simple guestbook application backed by AWS DynamoDB Table. The terraform configuration file will use both the Layer0 and AWS Terraform providers, to deploy the guestbook application and provision a new DynamoDB Table.\n\n\nPart 1: Clone the guides repository\n#\n\n\nRun this command to clone the \nquintilesims/guides\n repository:\n\n\ngit clone https://github.com/quintilesims/guides.git\n\n\nOnce you have cloned the repository, navigate to the \nguides/terraform-beyond-layer0/example-1\n folder for the rest of this example.\n\n\nPart 2: Terraform Plan\n#\n\n\n\n\nNote\n\n\nAs we're using modules in our Terraform configuration, we need to run \nterraform get\n command before performing other terraform operations. Running \nterraform get\n will download the modules to your local folder named \n.terraform\n. See here for more information on \nterraform get\n.\n\n\nterraform get\n\n\nGet: file:///Users/\n/go/src/github.com/quintilesims/guides/terraform-beyond-layer0/example-1/modules/guestbook_service\n\n\n\n\nBefore deploying, we can run the following command to see what changes Terraform will make to your infrastructure should you go ahead and apply. If you had any errors in your layer0.tf file, running \nterraform plan\n would output those errors so that you can address them. Also, Terraform will prompt you for configuration values that it does not have.\n\n\n\n\nTip\n\n\nThere are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the \nterraform.tfvars\n file, or exporting environment variables like \nTF_VAR_endpoint\n and \nTF_VAR_token\n, for example). See the \nTerraform Docs\n for more.\n\n\n\n\nterraform plan\n\n\nvar.endpoint\n Enter a value: \nenter your Layer0 endpoint\n\n\nvar.token\n Enter a value: \nenter your Layer0 token\n\n...\n+ aws_DynamoDB_table.guestbook\n arn: \ncomputed\n\n attribute.#: \n1\n\n attribute.4228504427.name: \nid\n\n attribute.4228504427.type: \nS\n\n hash_key: \nid\n\n name: \nguestbook\n\n read_capacity: \n20\n\n stream_arn: \ncomputed\n\n stream_enabled: \ncomputed\n\n stream_view_type: \ncomputed\n\n write_capacity: \n20\n\n\n...\n\n\n\n\nPart 3: Terraform Apply\n#\n\n\nRun the following command to begin the deploy process.\n\n\nterraform apply\n\n\nlayer0_environment.demo: Refreshing state...\n...\n...\n...\nlayer0_service.guestbook: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = \nhttp endpoint for the sample application\n\n\n\n\n\n\nNote\n\n\nIt may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time, you may get HTTP 503 errors when making HTTP requests against the load balancer URL.\n\n\n\n\nTerraform will set up the entire environment for you and then output a link to the application's load balancer.\n\n\nWhat's happening\n#\n\n\nTerraform using the \nAWS provider\n, provisions a new DynamoDB table. It also uses the \nLayer0 provider\n to provision the environment, deploy, load balancer and service required to run the entire guestbook application.\n\n\nLooking at an excerpt of the file \n./terraform-beyond-layer0/example-1/modules/guestbook_service/main.tf\n, we can see the following definitions:\n\n\nresource \naws_dynamodb_table\n \nguestbook\n {\n name = \n${var.table_name}\n\n read_capacity = 20\n write_capacity = 20\n hash_key = \nid\n\n\n attribute {\n name = \nid\n\n type = \nS\n\n }\n}\n\nresource \nlayer0_deploy\n \nguestbook\n {\n name = \nguestbook\n\n content = \n${data.template_file.guestbook.rendered}\n\n}\n\ndata \ntemplate_file\n \nguestbook\n {\n template = \n${file(\nDockerrun.aws.json\n)}\n\n\n vars {\n access_key = \n${var.access_key}\n\n secret_key = \n${var.secret_key}\n\n region = \n${var.region}\n\n table_name = \n${aws_dynamodb_table.guestbook.name}\n\n }\n}\n\n\n\n\nNote the resource definitions for \naws_dynamodb_table\n and \nlayer0_deploy\n. To configure the guestbook application to use the provisioned DynamoDB table, we reference the \nname\n property from the DynamoDB definition \ntable_name = \"${aws_dynamodb_table.guestbook.name}\"\n. \n\n\nThese \nvars\n are used to populate the template fields in our \nDockerrun.aws.json\n file. \n\n\n{\n \nAWSEBDockerrunVersion\n: 2,\n \ncontainerDefinitions\n: [\n {\n \nname\n: \nguestbook\n,\n \nimage\n: \nquintilesims/guestbook-db\n,\n \nessential\n: true,\n \nmemory\n: 128,\n \nenvironment\n: [\n {\n \nname\n: \nDYNAMO_TABLE\n,\n \nvalue\n: \n${table_name}\n\n }\n ...\n\n\n\n\nThe Layer0 configuration referencing the AWS DynamoDB configuration \ntable_name = \"${aws_DynamoDB_table.guestbook.name}\"\n, infers an implicit dependency. Before Terraform creates the infrastructure, it will use this information to order the resource creation and create resources in parallel, where there are no dependencies. In this example, the AWS DynamoDB table will be created before the Layer0 deploy. See \nTerraform Resource Dependencies\n for more information.\n\n\nPart 4: Scaling a Layer0 Service\n#\n\n\nThe workflow to make changes to your infrastructure generally involves updating your Terraform configuration file followed by a \nterraform plan\n and \nterraform apply\n.\n\n\nUpdate the Terraform configuration\n#\n\n\nOpen the file \n./example-1/modules/guestbook_service/main.tf\n in a text editor and make the change to add a \nscale\n property with a value of \n3\n to the \nlayer0_service\n section. For more information about the \nscale\n property, see \nLayer0 Terraform Plugin\n documentation. The result should look like the below:\n\n\nexample-1/modules/guestbook_service/main.tf\n\n\n# Create a service named \nguestbook\n\nresource \nlayer0_service\n \nguestbook\n {\n name = \nguestbook\n\n environment = \n${layer0_environment.demo.id}\n\n deploy = \n${layer0_deploy.guestbook.id}\n\n load_balancer = \n${layer0_load_balancer.guestbook.id}\n\n scale = 3\n}\n\n\n\n\nPlan and Apply\n#\n\n\nExecute the \nterraform plan\n command to understand the changes that you will be making. Note that if you did not specify \nscale\n, it defaults to '1'.\n\n\nterraform plan\n\n\nOutputs:\n\n\n...\n\n~ module.guestbook.layer0_service.guestbook\n scale: \n1\n =\n \n3\n\n\n\n\nNow run the following command to deploy your changes:\n\n\nterraform apply\n\n\nOutputs:\n\n\nlayer0_environment.demo: Refreshing state... (ID: demoenvbb9f6)\ndata.template_file.guestbook: Refreshing state...\nlayer0_deploy.guestbook: Refreshing state... (ID: guestbook.6)\nlayer0_load_balancer.guestbook: Refreshing state... (ID: guestbo43ab0)\nlayer0_service.guestbook: Refreshing state... (ID: guestboebca1)\nlayer0_service.guestbook: Modifying... (ID: guestboebca1)\n scale: \n1\n =\n \n3\n\nlayer0_service.guestbook: Modifications complete (ID: guestboebca1)\n\nApply complete! Resources: 0 added, 1 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: \n\nOutputs:\n\nservices = \nguestbook_service_url\n\n\n\n\nTo confirm your service has been updated to the desired scale, you can run the following layer0 command. Note that the desired scale for the guestbook service should be eventually be 3/3.\n\n\nl0 service get guestbook1_guestbook_svc\n\nOutputs:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo4fd3b guestbook1_guestbook_svc demo guestbook1_guestbook_lb guestbook1_guestbook_dpl:3* 1/3 (2)\n\n\n\n\nAs scale is a parameter we are likely to change in the future, rather than hardcoding it to 3 as we have done just now, it would be better to use a variable to store \nservice_scale\n. The following Best Practices sections will show how you can achieve this.\n\n\n\n\nBest Practices with Terraform + Layer0\n\n\nThe following sections outline some of the best practices and tips to take into consideration, when using Layer0 with Terraform.\n\n\n\n\nPart 5: Terraform Remote State\n#\n\n\nTerraform stores the state of the deployed infrastructure in a local file named \nterraform.tfstate\n by default. To find out more about why Terraform needs to store state, see \nPurpose of Terraform State\n. \n\n\nHow state is loaded and used for operations such as \nterraform apply\n is determined by a \nBackend\n. As mentioned, by default the state is stored locally which is enabled by a \"local\" backend.\n\n\nRemote State\n#\n\n\nBy default, Terraform stores state locally but it can also be configured to store state in a remote backend. This can prove useful when you are working as part of a team to provision and manage services deployed by Terraform. All the members of the team will need access to the state file to apply new changes and be able to do so without overwriting each others' changes. See here for more information on the different \nbackend types\n supported by Terraform.\n\n\nTo configure a remote backend, append the \nterraform\n section below to your terraform file \n./example-1/main.tf\n. Populate the \nbucket\n property to an existing s3 bucket.\n\n\n\n\nTip\n\n\nIf you have been following along with the guide, \n./example-1/main.tf\n should already have the below section commented out. You can uncomment the \nterraform\n section and populate the bucket property with an appropriate value.\n\n\n\n\nterraform {\n backend \ns3\n {\n bucket = \nmy-bucket-name\n\n key = \ndemo-env/remote-backend/terraform.tfstate\n\n region = \nus-west-2\n\n }\n}\n\n\n\n\nOnce you have modified \nmain.tf\n, you will need to initialize the newly configured backend by running the following command.\n\n\nterraform init\n\n\nOutputs:\n\n\nInitializing the backend...\n\nDo you want to copy state from \nlocal\n to \nconsul\n?\n ...\n Do you want to copy the state from \nlocal\n to \nconsul\n? Enter \nyes\n to copy\n and \nno\n to start with the existing state in \nconsul\n.\n\n Enter a value: \n\n\n\n\nGo ahead and enter: \nyes\n.\n\n\nSuccessfully configured the backend \nconsul\n! Terraform will automatically\nuse this backend unless the backend configuration changes.\n\nTerraform has been successfully initialized!\n...\n\n\n\n\nWhat's happening\n#\n\n\nAs you are configuring a backend for the first time, Terraform will give you an option to migrate your state to the new backend. From now on, any further changes to your infrastructure made by Terraform will result in the remote state file being updated. For more information see \nTerraform backends\n.\n\n\nA new team member can use the \nmain.tf\n from their own machine without obtaining a copy of the state file \nterraform.tfstate\n as the configuration will retrieve the state file from the remote backend.\n\n\nLocking\n#\n\n\nNot all remote backends support locking (locking ensures only one person is able to change the state at a time). The \nS3\n backend we used earlier in the example supports locking which is disabled by default. The \nS3\n backend uses a DynamoDB table to acquire a lock before making a change to the state file. To enable locking, you need to specify \nlocking_table\n property with the name of an existing DynamoDB table. The DynamoDB table also needs primary key named \nLockID\n of type \nString\n.\n\n\nSecurity\n#\n\n\nA Terraform state file is written in plain text. This can lead to a situation where deploying resources that require sensitive data can result in the sensitive data being stored in the state file. To minimize exposure of sensitive data, you can enable \nserver side encryption\n of the state file by adding property \nencrypt\n set to \ntrue\n.\n\n\nThis will ensure that the file is encrypted in S3 and by using a remote backend, you will also have the added benefit of the state file not being persisted to disk locally as it will only ever be held in memory by Terraform.\n\n\nFor securing the state file further, you can also enable access logging on the S3 bucket you are using for the remote backend, which can help track down invalid access should it occur.\n\n\nPart 6: Terraform Configuration Structure\n#\n\n\nWhile there are many different approaches to organizing your Terraform code, we suggest using the following file structure:\n\n\nexample1/ # contains overarching Terraform deployment, pulls in any modules that might exist\n \u2500 main.tf \n \u2500 variables.tf \n \u2500 output.tf \n + modules/ # if you can break up deployment into smaller modules, keep the modules in here\n + guestbook_service/ # contains Terraform configuration for a module\n \u2500 main.tf \n \u2500 variables.tf \n \u2500 output.tf\n + service2/ # contains another module\n + service3/ # contains another module\n\n\n\n\nHere we are making use of Terraform \nModules\n. Modules in Terraform are self-contained packages of Terraform configurations, that are managed as a group. Modules are used to create reusable components in Terraform as well as for basic code organization. In this example, we are using modules to separate each service and making it consumable as a module.\n\n\nIf you wanted to add a new service, you can create a new service folder inside the ./modules. If you wanted to you could even run multiple copies of the same service. See here for more information about \nCreating Modules\n.\n\n\nAlso see the below repositories for ideas on different ways you can organize your Terraform configuration files for the needs of your specific project: \n\n\n\n\nTerraform Community Modules\n\n\nBest Pratices Ops\n\n\n\n\nPart 7: State Environments\n#\n\n\nLayer0 recommends that you typically make a single environment for each tier of your application, such as \ndev\n, \nstaging\n and \nproduction\n. That recommendation still holds when using Terraform with Layer0. Using Layer0 CLI, you can target a specific environment for most CLI commands. This enables you to service each tier relatively easily. In Terraform, there a few approaches you can take to enable a similar workflow.\n\n\nSingle Terraform Configuration\n#\n\n\nYou can use a single Terraform configuration to create and maintain multiple environments by making use of the \nCount\n parameter, inside a Resource. Count enables you to create multiple copies of a given resource. \n\n\nFor example\n\n\nvariable \nenvironments\n {\n type = \nlist\n\n\n default = [\n \ndev\n,\n \nstaging\n\n \nproduction\n\n ]\n}\n\nresource \nlayer0_environment\n \ndemo\n {\n count = \n${length(var.environments)}\n\n\n name = \n${var.environments[count.index]}_demo\n\n}\n\n\n\n\nLet's have a more in-depth look in how this works. You can start by navigating to `./terraform-beyond-layer0/example-2' folder. Start by running the plan command.\n\n\nterraform plan\n\n\nOutputs:\n\n\n+ module.environment.aws_dynamodb_table.guestbook.0\n ...\n name: \ndev_guestbook\n\n...\n+ module.environment.aws_dynamodb_table.guestbook.1\n ..\n name: \nstaging_guestbook\n\n...\n\n\n\n\nNote that you will see a copy of each resource for each environment specified in your environments file in \n./example-2/variables.tf\n. Go ahead and run apply.\n\n\nterraform apply\n\n\nOutputs:\n\n\nApply complete! Resources: 10 added, 0 changed, 0 destroyed.\n\nOutputs:\n\nguestbook_urls = \n\ndev_url\n\n\nstaging_url\n\n\n\n\nYou have now created two separate environments using a single terraform configuration: dev \n staging. You can navigate to both the urls output and you should note that they are separate instances of the guestbook application backed with their own separate data store.\n\n\nA common use case for maintaining different environments is to configure each environment slightly differently. For example, you might want to scale your Layer0 service to 3 for staging and leave it as 1 for the dev environment. This can be done easily by using conditional logic to set our \nscale\n parameter in the layer0 service configuration in \n./example-2/main.tf\n. Go ahead and open \nmain.tf\n in a text editor. Navigate to the \nlayer0_service guestbook\n section. Uncomment the scale parameter so that your configuration looks like below.\n\n\nresource \nlayer0_service\n \nguestbook\n {\n count = \n${length(var.environments)}\n\n\n name = \n${element(layer0_environment.demo.*.name, count.index)}_guestbook_svc\n\n environment = \n${element(layer0_environment.demo.*.id, count.index)}\n\n deploy = \n${element(layer0_deploy.guestbook.*.id, count.index)}\n\n load_balancer = \n${element(layer0_load_balancer.guestbook.*.id, count.index)}\n\n scale = scale = \n${lookup(var.service_scale, var.environments[count.index]), \n1\n)}\n\n}\n\n\n\n\nThe variable \nservice_scale\n is already defined in \nvariables.tf\n. If you now go ahead and run plan, you will see that the \nguestbook\n service for only the \nstaging\n environment will be scaled up.\n\n\nterraform plan\n\n\nOutputs:\n\n\n~ layer0_service.guestbook.1\n scale: \n1\n =\n \n3\n\n\n\n\nA potential downside of this approach however is that all your environments are using the same state file. Sharing a state file breaks some of the resource encapsulation between environments. Should there ever be a situation where your state file becomes corrupt, it would affect your ability to service all the environments till you resolve the issue by potentially rolling back to a previous copy of the state file. \n\n\nThe next section will show you how you can separate your Terraform environment configuration such that each environment will have its own state file.\n\n\n\n\nNote\n\n\nAs previously mentioned, you will want to avoid hardcoding resource parameter configuration values as much as possible. As an example the scale property of a layer0 service. But this extends to other properties as well like docker image version etc. You should avoid using \nlatest\n and specify a explicit version via configurable variable when possible.\n\n\n\n\nMultiple Terraform Configurations\n#\n\n\nThe previous example used a single set of Terraform Configuration files to create and maintain multiple environments. This resulted in a single state file which had the state information for all the environments. To avoid all environments sharing a single state file, you can split your Terraform configuration so that you a state file for each environment.\n\n\nGo ahead and navigate to \n./terraform-beyond-layer0/example-3\n folder. Here we are using a folder to separate each environment. So \nenv-dev\n and \nenv-staging\n represent a \ndev\n and \nstaging\n environment. To work with either of the environments, you will need to navigate into the desired environment's folder and run Terraform commands. This will ensure that each environment will have its own state file.\n\n\nOpen the env-dev folder inside a text editor. Note that \nmain.tf\n doesn't contain any resource definitions. Instead, we only have one module definition which has various variables being passed in, which is also how we are passing in the \nenvironment\n variable. To create a \ndev\n and \nstaging\n environments for our guestbook application, go ahead and run terraform plan and apply commands from \nenv-dev\n and \nenv-staging\n folders.\n\n\n# assuming you are in the terraform-beyond-layer0/example-3 folder\ncd env-dev\nterraform get\nterraform plan\nterraform apply\n\ncd ../env-staging\nterraform get\nterraform plan\nterraform apply\n\n\n\n\nYou should now have two instances of the guestbook application running. Note that our guestbook service in our staging environment has been scaled to 3. We have done this by specifying a map variable \nservice_scale\n in \n./example-3/dev-staging/variables.tf\n which can have different scale values for each environment.\n\n\nPart 8: Multiple Provider Instances\n#\n\n\nYou can define multiple instances of the same provider that is uniquely customized. For example, you can have an \naws\n provider to support multiple regions, different roles etc or in the case of the \nlayer0\n provider, to support multiple layer0 endpoints.\n\n\nFor example:\n\n\n# aws provider\nprovider \naws\n {\n alias = \neast\n\n region = \nus-east-1\n\n # ...\n}\n\n# aws provider configured to a west region\nprovider \naws\n {\n alias = \nwest\n\n region = \nus-west-1\n\n # ...\n}\n\n\n\n\nThis will now allow you to reference aws providers configured to a different region. You can do so by referencing the provider using the naming scheme \nTYPE.ALIAS\n, which in the above example results in \naws.west\n. See \nProvider Configuration\n for more information.\n\n\nresource \naws.east_instance\n \nfoo\n {\n # ...\n}\n\nresource \naws.west_instance\n \nbar\n {\n # ...\n}\n\n\n\n\nPart 9: Cleanup\n#\n\n\nWhen you're finished with the examples in this guide, run the following destroy command in all the following directories to destroy the Layer0 environment, application and the DynamoDB Table.\n\n\nDirectories: \n\n\n\n\n/example-1 \n\n\n/example-2 \n\n\n/example-3/env-dev \n\n\n/example-3/env-staging \n\n\n\n\nterraform destroy\n\n\n\n\nRemote Backend Resources\n\n\nIf you created additional resources (S3 bucket and a DynamoDB Table) separately when configuring a \nRemote Backend\n, do not forget to delete those if they are no longer needed. You should be able to look at your Terraform configuration file \nlayer0.tf\n to determine the name of the bucket and table.", - "title": "Terraform beyond Layer0" - }, - { - "location": "/guides/terraform_beyond_layer0/#deployment-guide-terraform-beyond-layer0", - "text": "In this example, we'll learn how you can use Terraform to create a Layer0 service as well as a persistent data store. The main goal of this example is to explore how you can combine Layer0 with other Terraform providers and best practices.", - "title": "Deployment guide: Terraform beyond Layer0" - }, - { - "location": "/guides/terraform_beyond_layer0/#before-you-start", - "text": "To complete the procedures in this section, you must have the following installed and configured correctly: Layer0 v0.8.4 or later Terraform v0.9.0 or later Layer0 Terraform Provider If you have not already configured Layer0, see the Layer0 installation guide . If you are running an older version of Layer0, see the Layer0 upgrade instructions . See the Terraform installation guide to install Terraform and the Layer0 Terraform Plugin.", - "title": "Before you start" - }, - { - "location": "/guides/terraform_beyond_layer0/#deploy-with-terraform", - "text": "Using Terraform, you will deploy a simple guestbook application backed by AWS DynamoDB Table. The terraform configuration file will use both the Layer0 and AWS Terraform providers, to deploy the guestbook application and provision a new DynamoDB Table.", - "title": "Deploy with Terraform" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-1-clone-the-guides-repository", - "text": "Run this command to clone the quintilesims/guides repository: git clone https://github.com/quintilesims/guides.git Once you have cloned the repository, navigate to the guides/terraform-beyond-layer0/example-1 folder for the rest of this example.", - "title": "Part 1: Clone the guides repository" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-2-terraform-plan", - "text": "Note As we're using modules in our Terraform configuration, we need to run terraform get command before performing other terraform operations. Running terraform get will download the modules to your local folder named .terraform . See here for more information on terraform get . terraform get Get: file:///Users/ /go/src/github.com/quintilesims/guides/terraform-beyond-layer0/example-1/modules/guestbook_service Before deploying, we can run the following command to see what changes Terraform will make to your infrastructure should you go ahead and apply. If you had any errors in your layer0.tf file, running terraform plan would output those errors so that you can address them. Also, Terraform will prompt you for configuration values that it does not have. Tip There are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the terraform.tfvars file, or exporting environment variables like TF_VAR_endpoint and TF_VAR_token , for example). See the Terraform Docs for more. terraform plan var.endpoint\n Enter a value: enter your Layer0 endpoint \n\nvar.token\n Enter a value: enter your Layer0 token \n...\n+ aws_DynamoDB_table.guestbook\n arn: computed \n attribute.#: 1 \n attribute.4228504427.name: id \n attribute.4228504427.type: S \n hash_key: id \n name: guestbook \n read_capacity: 20 \n stream_arn: computed \n stream_enabled: computed \n stream_view_type: computed \n write_capacity: 20 \n\n...", - "title": "Part 2: Terraform Plan" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-3-terraform-apply", - "text": "Run the following command to begin the deploy process. terraform apply layer0_environment.demo: Refreshing state...\n...\n...\n...\nlayer0_service.guestbook: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = http endpoint for the sample application Note It may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time, you may get HTTP 503 errors when making HTTP requests against the load balancer URL. Terraform will set up the entire environment for you and then output a link to the application's load balancer.", - "title": "Part 3: Terraform Apply" - }, - { - "location": "/guides/terraform_beyond_layer0/#whats-happening", - "text": "Terraform using the AWS provider , provisions a new DynamoDB table. It also uses the Layer0 provider to provision the environment, deploy, load balancer and service required to run the entire guestbook application. Looking at an excerpt of the file ./terraform-beyond-layer0/example-1/modules/guestbook_service/main.tf , we can see the following definitions: resource aws_dynamodb_table guestbook {\n name = ${var.table_name} \n read_capacity = 20\n write_capacity = 20\n hash_key = id \n\n attribute {\n name = id \n type = S \n }\n}\n\nresource layer0_deploy guestbook {\n name = guestbook \n content = ${data.template_file.guestbook.rendered} \n}\n\ndata template_file guestbook {\n template = ${file( Dockerrun.aws.json )} \n\n vars {\n access_key = ${var.access_key} \n secret_key = ${var.secret_key} \n region = ${var.region} \n table_name = ${aws_dynamodb_table.guestbook.name} \n }\n} Note the resource definitions for aws_dynamodb_table and layer0_deploy . To configure the guestbook application to use the provisioned DynamoDB table, we reference the name property from the DynamoDB definition table_name = \"${aws_dynamodb_table.guestbook.name}\" . These vars are used to populate the template fields in our Dockerrun.aws.json file. {\n AWSEBDockerrunVersion : 2,\n containerDefinitions : [\n {\n name : guestbook ,\n image : quintilesims/guestbook-db ,\n essential : true,\n memory : 128,\n environment : [\n {\n name : DYNAMO_TABLE ,\n value : ${table_name} \n }\n ... The Layer0 configuration referencing the AWS DynamoDB configuration table_name = \"${aws_DynamoDB_table.guestbook.name}\" , infers an implicit dependency. Before Terraform creates the infrastructure, it will use this information to order the resource creation and create resources in parallel, where there are no dependencies. In this example, the AWS DynamoDB table will be created before the Layer0 deploy. See Terraform Resource Dependencies for more information.", - "title": "What's happening" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-4-scaling-a-layer0-service", - "text": "The workflow to make changes to your infrastructure generally involves updating your Terraform configuration file followed by a terraform plan and terraform apply .", - "title": "Part 4: Scaling a Layer0 Service" - }, - { - "location": "/guides/terraform_beyond_layer0/#update-the-terraform-configuration", - "text": "Open the file ./example-1/modules/guestbook_service/main.tf in a text editor and make the change to add a scale property with a value of 3 to the layer0_service section. For more information about the scale property, see Layer0 Terraform Plugin documentation. The result should look like the below: example-1/modules/guestbook_service/main.tf # Create a service named guestbook \nresource layer0_service guestbook {\n name = guestbook \n environment = ${layer0_environment.demo.id} \n deploy = ${layer0_deploy.guestbook.id} \n load_balancer = ${layer0_load_balancer.guestbook.id} \n scale = 3\n}", - "title": "Update the Terraform configuration" - }, - { - "location": "/guides/terraform_beyond_layer0/#plan-and-apply", - "text": "Execute the terraform plan command to understand the changes that you will be making. Note that if you did not specify scale , it defaults to '1'. terraform plan Outputs: ...\n\n~ module.guestbook.layer0_service.guestbook\n scale: 1 = 3 Now run the following command to deploy your changes: terraform apply Outputs: layer0_environment.demo: Refreshing state... (ID: demoenvbb9f6)\ndata.template_file.guestbook: Refreshing state...\nlayer0_deploy.guestbook: Refreshing state... (ID: guestbook.6)\nlayer0_load_balancer.guestbook: Refreshing state... (ID: guestbo43ab0)\nlayer0_service.guestbook: Refreshing state... (ID: guestboebca1)\nlayer0_service.guestbook: Modifying... (ID: guestboebca1)\n scale: 1 = 3 \nlayer0_service.guestbook: Modifications complete (ID: guestboebca1)\n\nApply complete! Resources: 0 added, 1 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: \n\nOutputs:\n\nservices = guestbook_service_url To confirm your service has been updated to the desired scale, you can run the following layer0 command. Note that the desired scale for the guestbook service should be eventually be 3/3. l0 service get guestbook1_guestbook_svc \nOutputs: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo4fd3b guestbook1_guestbook_svc demo guestbook1_guestbook_lb guestbook1_guestbook_dpl:3* 1/3 (2) As scale is a parameter we are likely to change in the future, rather than hardcoding it to 3 as we have done just now, it would be better to use a variable to store service_scale . The following Best Practices sections will show how you can achieve this. Best Practices with Terraform + Layer0 The following sections outline some of the best practices and tips to take into consideration, when using Layer0 with Terraform.", - "title": "Plan and Apply" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-5-terraform-remote-state", - "text": "Terraform stores the state of the deployed infrastructure in a local file named terraform.tfstate by default. To find out more about why Terraform needs to store state, see Purpose of Terraform State . How state is loaded and used for operations such as terraform apply is determined by a Backend . As mentioned, by default the state is stored locally which is enabled by a \"local\" backend.", - "title": "Part 5: Terraform Remote State" - }, - { - "location": "/guides/terraform_beyond_layer0/#remote-state", - "text": "By default, Terraform stores state locally but it can also be configured to store state in a remote backend. This can prove useful when you are working as part of a team to provision and manage services deployed by Terraform. All the members of the team will need access to the state file to apply new changes and be able to do so without overwriting each others' changes. See here for more information on the different backend types supported by Terraform. To configure a remote backend, append the terraform section below to your terraform file ./example-1/main.tf . Populate the bucket property to an existing s3 bucket. Tip If you have been following along with the guide, ./example-1/main.tf should already have the below section commented out. You can uncomment the terraform section and populate the bucket property with an appropriate value. terraform {\n backend s3 {\n bucket = my-bucket-name \n key = demo-env/remote-backend/terraform.tfstate \n region = us-west-2 \n }\n} Once you have modified main.tf , you will need to initialize the newly configured backend by running the following command. terraform init Outputs: Initializing the backend...\n\nDo you want to copy state from local to consul ?\n ...\n Do you want to copy the state from local to consul ? Enter yes to copy\n and no to start with the existing state in consul .\n\n Enter a value: Go ahead and enter: yes . Successfully configured the backend consul ! Terraform will automatically\nuse this backend unless the backend configuration changes.\n\nTerraform has been successfully initialized!\n...", - "title": "Remote State" - }, - { - "location": "/guides/terraform_beyond_layer0/#whats-happening_1", - "text": "As you are configuring a backend for the first time, Terraform will give you an option to migrate your state to the new backend. From now on, any further changes to your infrastructure made by Terraform will result in the remote state file being updated. For more information see Terraform backends . A new team member can use the main.tf from their own machine without obtaining a copy of the state file terraform.tfstate as the configuration will retrieve the state file from the remote backend.", - "title": "What's happening" - }, - { - "location": "/guides/terraform_beyond_layer0/#locking", - "text": "Not all remote backends support locking (locking ensures only one person is able to change the state at a time). The S3 backend we used earlier in the example supports locking which is disabled by default. The S3 backend uses a DynamoDB table to acquire a lock before making a change to the state file. To enable locking, you need to specify locking_table property with the name of an existing DynamoDB table. The DynamoDB table also needs primary key named LockID of type String .", - "title": "Locking" - }, - { - "location": "/guides/terraform_beyond_layer0/#security", - "text": "A Terraform state file is written in plain text. This can lead to a situation where deploying resources that require sensitive data can result in the sensitive data being stored in the state file. To minimize exposure of sensitive data, you can enable server side encryption of the state file by adding property encrypt set to true . This will ensure that the file is encrypted in S3 and by using a remote backend, you will also have the added benefit of the state file not being persisted to disk locally as it will only ever be held in memory by Terraform. For securing the state file further, you can also enable access logging on the S3 bucket you are using for the remote backend, which can help track down invalid access should it occur.", - "title": "Security" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-6-terraform-configuration-structure", - "text": "While there are many different approaches to organizing your Terraform code, we suggest using the following file structure: example1/ # contains overarching Terraform deployment, pulls in any modules that might exist\n \u2500 main.tf \n \u2500 variables.tf \n \u2500 output.tf \n + modules/ # if you can break up deployment into smaller modules, keep the modules in here\n + guestbook_service/ # contains Terraform configuration for a module\n \u2500 main.tf \n \u2500 variables.tf \n \u2500 output.tf\n + service2/ # contains another module\n + service3/ # contains another module Here we are making use of Terraform Modules . Modules in Terraform are self-contained packages of Terraform configurations, that are managed as a group. Modules are used to create reusable components in Terraform as well as for basic code organization. In this example, we are using modules to separate each service and making it consumable as a module. If you wanted to add a new service, you can create a new service folder inside the ./modules. If you wanted to you could even run multiple copies of the same service. See here for more information about Creating Modules . Also see the below repositories for ideas on different ways you can organize your Terraform configuration files for the needs of your specific project: Terraform Community Modules Best Pratices Ops", - "title": "Part 6: Terraform Configuration Structure" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-7-state-environments", - "text": "Layer0 recommends that you typically make a single environment for each tier of your application, such as dev , staging and production . That recommendation still holds when using Terraform with Layer0. Using Layer0 CLI, you can target a specific environment for most CLI commands. This enables you to service each tier relatively easily. In Terraform, there a few approaches you can take to enable a similar workflow.", - "title": "Part 7: State Environments" - }, - { - "location": "/guides/terraform_beyond_layer0/#single-terraform-configuration", - "text": "You can use a single Terraform configuration to create and maintain multiple environments by making use of the Count parameter, inside a Resource. Count enables you to create multiple copies of a given resource. For example variable environments {\n type = list \n\n default = [\n dev ,\n staging \n production \n ]\n}\n\nresource layer0_environment demo {\n count = ${length(var.environments)} \n\n name = ${var.environments[count.index]}_demo \n} Let's have a more in-depth look in how this works. You can start by navigating to `./terraform-beyond-layer0/example-2' folder. Start by running the plan command. terraform plan Outputs: + module.environment.aws_dynamodb_table.guestbook.0\n ...\n name: dev_guestbook \n...\n+ module.environment.aws_dynamodb_table.guestbook.1\n ..\n name: staging_guestbook \n... Note that you will see a copy of each resource for each environment specified in your environments file in ./example-2/variables.tf . Go ahead and run apply. terraform apply Outputs: Apply complete! Resources: 10 added, 0 changed, 0 destroyed.\n\nOutputs:\n\nguestbook_urls = dev_url staging_url You have now created two separate environments using a single terraform configuration: dev staging. You can navigate to both the urls output and you should note that they are separate instances of the guestbook application backed with their own separate data store. A common use case for maintaining different environments is to configure each environment slightly differently. For example, you might want to scale your Layer0 service to 3 for staging and leave it as 1 for the dev environment. This can be done easily by using conditional logic to set our scale parameter in the layer0 service configuration in ./example-2/main.tf . Go ahead and open main.tf in a text editor. Navigate to the layer0_service guestbook section. Uncomment the scale parameter so that your configuration looks like below. resource layer0_service guestbook {\n count = ${length(var.environments)} \n\n name = ${element(layer0_environment.demo.*.name, count.index)}_guestbook_svc \n environment = ${element(layer0_environment.demo.*.id, count.index)} \n deploy = ${element(layer0_deploy.guestbook.*.id, count.index)} \n load_balancer = ${element(layer0_load_balancer.guestbook.*.id, count.index)} \n scale = scale = ${lookup(var.service_scale, var.environments[count.index]), 1 )} \n} The variable service_scale is already defined in variables.tf . If you now go ahead and run plan, you will see that the guestbook service for only the staging environment will be scaled up. terraform plan Outputs: ~ layer0_service.guestbook.1\n scale: 1 = 3 A potential downside of this approach however is that all your environments are using the same state file. Sharing a state file breaks some of the resource encapsulation between environments. Should there ever be a situation where your state file becomes corrupt, it would affect your ability to service all the environments till you resolve the issue by potentially rolling back to a previous copy of the state file. The next section will show you how you can separate your Terraform environment configuration such that each environment will have its own state file. Note As previously mentioned, you will want to avoid hardcoding resource parameter configuration values as much as possible. As an example the scale property of a layer0 service. But this extends to other properties as well like docker image version etc. You should avoid using latest and specify a explicit version via configurable variable when possible.", - "title": "Single Terraform Configuration" - }, - { - "location": "/guides/terraform_beyond_layer0/#multiple-terraform-configurations", - "text": "The previous example used a single set of Terraform Configuration files to create and maintain multiple environments. This resulted in a single state file which had the state information for all the environments. To avoid all environments sharing a single state file, you can split your Terraform configuration so that you a state file for each environment. Go ahead and navigate to ./terraform-beyond-layer0/example-3 folder. Here we are using a folder to separate each environment. So env-dev and env-staging represent a dev and staging environment. To work with either of the environments, you will need to navigate into the desired environment's folder and run Terraform commands. This will ensure that each environment will have its own state file. Open the env-dev folder inside a text editor. Note that main.tf doesn't contain any resource definitions. Instead, we only have one module definition which has various variables being passed in, which is also how we are passing in the environment variable. To create a dev and staging environments for our guestbook application, go ahead and run terraform plan and apply commands from env-dev and env-staging folders. # assuming you are in the terraform-beyond-layer0/example-3 folder\ncd env-dev\nterraform get\nterraform plan\nterraform apply\n\ncd ../env-staging\nterraform get\nterraform plan\nterraform apply You should now have two instances of the guestbook application running. Note that our guestbook service in our staging environment has been scaled to 3. We have done this by specifying a map variable service_scale in ./example-3/dev-staging/variables.tf which can have different scale values for each environment.", - "title": "Multiple Terraform Configurations" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-8-multiple-provider-instances", - "text": "You can define multiple instances of the same provider that is uniquely customized. For example, you can have an aws provider to support multiple regions, different roles etc or in the case of the layer0 provider, to support multiple layer0 endpoints. For example: # aws provider\nprovider aws {\n alias = east \n region = us-east-1 \n # ...\n}\n\n# aws provider configured to a west region\nprovider aws {\n alias = west \n region = us-west-1 \n # ...\n} This will now allow you to reference aws providers configured to a different region. You can do so by referencing the provider using the naming scheme TYPE.ALIAS , which in the above example results in aws.west . See Provider Configuration for more information. resource aws.east_instance foo {\n # ...\n}\n\nresource aws.west_instance bar {\n # ...\n}", - "title": "Part 8: Multiple Provider Instances" - }, - { - "location": "/guides/terraform_beyond_layer0/#part-9-cleanup", - "text": "When you're finished with the examples in this guide, run the following destroy command in all the following directories to destroy the Layer0 environment, application and the DynamoDB Table. Directories: /example-1 /example-2 /example-3/env-dev /example-3/env-staging terraform destroy Remote Backend Resources If you created additional resources (S3 bucket and a DynamoDB Table) separately when configuring a Remote Backend , do not forget to delete those if they are no longer needed. You should be able to look at your Terraform configuration file layer0.tf to determine the name of the bucket and table.", - "title": "Part 9: Cleanup" - }, - { - "location": "/guides/one_off_task/", - "text": "Deployment guide: Guestbook one-off task\n#\n\n\nIn this example, you will learn how to use layer0 to run a one-off task. A task is used to run a single instance of your Task Definition and is typically a short running job that will be stopped once finished.\n\n\n\n\nBefore you start\n#\n\n\nIn order to complete the procedures in this section, you must install and configure Layer0 v0.8.4 or later. If you have not already configured Layer0, see the \ninstallation guide\n. If you are running an older version of Layer0, see the \nupgrade instructions\n.\n\n\nPart 1: Prepare the task definition\n#\n\n\n\n\nDownload the \nGuestbook One-off Task Definition\n and save it to your computer as \nDockerrun.aws.json\n.\n\n\n\n\nPart 2: Create a deploy\n#\n\n\nNext, you will create a new deploy for the task using the \ndeploy create\n command. At the command prompt, run the following command:\n\n\nl0 deploy create Dockerrun.aws.json one-off-task-dpl\n\n\nYou will see the following output:\n\n\nDEPLOY ID DEPLOY NAME VERSION\none-off-task-dpl.1 one-off-task-dpl 1\n\n\n\n\nPart 3: Create the task\n#\n\n\nAt this point, you can use the \ntask create\n command to run a copy of the task.\n\n\nTo run the task, use the following command:\n\n\nl0 task create demo-env echo-tsk one-off-task-dpl:latest --wait\n\n\nYou will see the following output:\n\n\nTASK ID TASK NAME ENVIRONMENT DEPLOY SCALE\none-off851c9 echo-tsk demo-env one-off-task-dpl:1 0/1 (1)\n\n\n\n\nThe \nSCALE\n column shows the running, desired and pending counts. A value of \n0/1 (1)\n indicates that running = 0, desired = 1 and (1) for 1 pending task that is about to transition to running state. After your task has finished running, note that the desired count will remain 1 and pending value will no longer be shown, so the value will be \n0/1\n for a finished task.\n\n\nPart 4: Check the status of the task\n#\n\n\nTo view the logs for this task, and evaluate its progress, you can use the \ntask logs\n command:\n\n\nl0 task logs one-off-task-tsk\n \n\n\nYou will see the following output:\n\n\nalpine\n------\nTask finished!\n\n\n\n\nYou can also use the following command for more information in the task.\n\n\nl0 -o json task get echo-tsk\n\n\nOutputs:\n\n\n[\n {\n \ncopies\n: [\n {\n \ndetails\n: [],\n \nreason\n: \nWaiting for cluster capacity to run\n,\n \ntask_copy_id\n: \n\n }\n ],\n \ndeploy_id\n: \none-off-task-dpl.2\n,\n \ndeploy_name\n: \none-off-task-dpl\n,\n \ndeploy_version\n: \n2\n,\n \ndesired_count\n: 1,\n \nenvironment_id\n: \ndemoenv669e4\n,\n \nenvironment_name\n: \ndemo-env\n,\n \npending_count\n: 1,\n \nrunning_count\n: 0,\n \ntask_id\n: \nechotsk1facd\n,\n \ntask_name\n: \necho-tsk\n\n }\n]\n\n\n\n\nAfter the task has finished, running \nl0 -o json task get echo-tsk\n will show a pending_count of 0.\n\n\nOutputs:\n\n\n...\n\ncopies\n: [\n {\n \ndetails\n: [\n {\n \ncontainer_name\n: \nalpine\n,\n \nexit_code\n: 0,\n \nlast_status\n: \nSTOPPED\n,\n \nreason\n: \n\n }\n ],\n \nreason\n: \nEssential container in task exited\n,\n \ntask_copy_id\n: \narn:aws:ecs:us-west-2:856306994068:task/0e723c3e-9cd1-4914-8393-b59abd40eb89\n\n }\n],\n...\n\npending_count\n: 0,\n\nrunning_count\n: 0,\n...", - "title": "One-off Task" - }, - { - "location": "/guides/one_off_task/#deployment-guide-guestbook-one-off-task", - "text": "In this example, you will learn how to use layer0 to run a one-off task. A task is used to run a single instance of your Task Definition and is typically a short running job that will be stopped once finished.", - "title": "Deployment guide: Guestbook one-off task" - }, - { - "location": "/guides/one_off_task/#before-you-start", - "text": "In order to complete the procedures in this section, you must install and configure Layer0 v0.8.4 or later. If you have not already configured Layer0, see the installation guide . If you are running an older version of Layer0, see the upgrade instructions .", - "title": "Before you start" - }, - { - "location": "/guides/one_off_task/#part-1-prepare-the-task-definition", - "text": "Download the Guestbook One-off Task Definition and save it to your computer as Dockerrun.aws.json .", - "title": "Part 1: Prepare the task definition" - }, - { - "location": "/guides/one_off_task/#part-2-create-a-deploy", - "text": "Next, you will create a new deploy for the task using the deploy create command. At the command prompt, run the following command: l0 deploy create Dockerrun.aws.json one-off-task-dpl You will see the following output: DEPLOY ID DEPLOY NAME VERSION\none-off-task-dpl.1 one-off-task-dpl 1", - "title": "Part 2: Create a deploy" - }, - { - "location": "/guides/one_off_task/#part-3-create-the-task", - "text": "At this point, you can use the task create command to run a copy of the task. To run the task, use the following command: l0 task create demo-env echo-tsk one-off-task-dpl:latest --wait You will see the following output: TASK ID TASK NAME ENVIRONMENT DEPLOY SCALE\none-off851c9 echo-tsk demo-env one-off-task-dpl:1 0/1 (1) The SCALE column shows the running, desired and pending counts. A value of 0/1 (1) indicates that running = 0, desired = 1 and (1) for 1 pending task that is about to transition to running state. After your task has finished running, note that the desired count will remain 1 and pending value will no longer be shown, so the value will be 0/1 for a finished task.", - "title": "Part 3: Create the task" - }, - { - "location": "/guides/one_off_task/#part-4-check-the-status-of-the-task", - "text": "To view the logs for this task, and evaluate its progress, you can use the task logs command: l0 task logs one-off-task-tsk You will see the following output: alpine\n------\nTask finished! You can also use the following command for more information in the task. l0 -o json task get echo-tsk Outputs: [\n {\n copies : [\n {\n details : [],\n reason : Waiting for cluster capacity to run ,\n task_copy_id : \n }\n ],\n deploy_id : one-off-task-dpl.2 ,\n deploy_name : one-off-task-dpl ,\n deploy_version : 2 ,\n desired_count : 1,\n environment_id : demoenv669e4 ,\n environment_name : demo-env ,\n pending_count : 1,\n running_count : 0,\n task_id : echotsk1facd ,\n task_name : echo-tsk \n }\n] After the task has finished, running l0 -o json task get echo-tsk will show a pending_count of 0. Outputs: ... copies : [\n {\n details : [\n {\n container_name : alpine ,\n exit_code : 0,\n last_status : STOPPED ,\n reason : \n }\n ],\n reason : Essential container in task exited ,\n task_copy_id : arn:aws:ecs:us-west-2:856306994068:task/0e723c3e-9cd1-4914-8393-b59abd40eb89 \n }\n],\n... pending_count : 0, running_count : 0,\n...", - "title": "Part 4: Check the status of the task" - }, - { - "location": "/reference/cli/", - "text": "Layer0 CLI Reference\n#\n\n\nGlobal options\n#\n\n\nThe \nl0\n application is designed to be used with one of several subcommands: \nadmin\n, \ndeploy\n, \nenvironment\n, \njob\n, \nloadbalancer\n, \nservice\n, and \ntask\n. These subcommands are detailed in the sections below. There are, however, some global parameters that you may specify when using \nl0\n.\n\n\nUsage\n#\n\n\n\n \n\n \nl0\n [\nglobalOptions\n] \ncommand\n \nsubcommand\n [\noptions\n] [\nparameters\n]\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--output {text|json}\n\n \nSpecify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the \n--output json\n option, you can force \nl0\n to output JSON-formatted text.\n\n \n\n \n\n \n--version\n\n \nDisplay the version number of the \nl0\n application.\n\n \n\n\n\n\n\n\nAdmin\n#\n\n\nThe \nadmin\n command is used to manage the Layer0 API server. This command is used with the following subcommands: \ndebug\n, \nsql\n, and \nversion\n.\n\n\nadmin debug\n#\n\n\nUse the \ndebug\n subcommand to view the running version of your Layer0 API server and CLI.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 admin debug\n\n \n\n\n\n\nadmin sql\n#\n\n\nUse the \nsql\n subcommand to initialize the Layer0 API database.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 admin sql\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe \nsql\n subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so.\n\n \n\n\n\n\nadmin version\n#\n\n\nUse the \nversion\n subcommand to display the current version of the Layer0 API.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 admin version\n\n \n\n\n\n\n\n\nDeploy\n#\n\n\ndeploy create\n#\n\n\nUse the \ncreate\n subcommand to upload a Docker task definition into Layer0. This command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n and \nlist\n.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 deploy create\n \ndockerPath\n \ndeployName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \ndockerPath\n\n \nThe path to the Docker task definition that you want to upload.\n\n \n\n \n\n \ndeployName\n\n \nA name for the deploy.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nIf \ndeployName\n exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version.\n\n \n \n\n \n\n \nIf you use Visual Studio to modify or create your Dockerrun file, you may see an \"Invalid Dockerrun.aws.json\" error. This error is caused by the default encoding used by Visual Studio. See the \n\"Common issues\" page\n for steps to resolve this issue.\n\n \n \n\n \n\n \n\nDeploys created through Layer0 are rendered with a \nlogConfiguration\n section for each container.\nIf a \nlogConfiguration\n section already exists, no changes are made to the section.\nThe additional section enables logs from each container to be sent to the the Layer0 log group.\nThis is where logs are looked up during \nl0 \nentity\n logs\n commands.\nThe added \nlogConfiguration\n section uses the following template:\n\n\nlogConfiguration\n: {\n \nlogDriver\n: \nawslogs\n,\n \noptions\n: {\n \nawslogs-group\n: \nl0-\nprefix\n,\n \nawslogs-region\n: \nregion\n,\n \nawslogs-stream-prefix\n: \nl0\n\n }\n }\n}\n\n\n\n\n\n \n\n\n\n\ndeploy delete\n#\n\n\nUse the \ndelete\n subcommand to delete a version of a Layer0 deploy.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 deploy delete\n \ndeployID\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \ndeployID\n\n \nThe unique identifier of the version of the deploy that you want to delete. You can obtain a list of deployIDs for a given deploy by executing the following command: \nl0 deploy get\n \ndeployName\n\n \n\n\n\n\ndeploy get\n#\n\n\nUse the \nget\n subcommand to view information about an existing Layer0 deploy.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 deploy get\n \ndeployName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \ndeployName\n\n \nThe name of the Layer0 deploy for which you want to view additional information.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe \nget\n subcommand supports wildcard matching: \nl0 deploy get dep*\n would return all deploys beginning with \ndep\n.\n\n \n\n\n\n\ndeploy list\n#\n\n\nUse the \nlist\n subcommand to view a list of deploys in your instance of Layer0.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 deploy list\n\n \n\n\n\n\n\n\nEnvironment\n#\n\n\nLayer0 environments allow you to isolate services and load balancers for specific applications.\nThe \nenvironment\n command is used to manage Layer0 environments. This command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n, \nlist\n, and \nsetmincount\n.\n\n\nenvironment create\n#\n\n\nUse the \ncreate\n subcommand to create an additional Layer0 environment (\nenvironmentName\n).\n\n\nUsage\n#\n\n\n\n \n\n \nl0 environment create\n [--size] [--min-count] [--user-data] [--os] [--ami] \nenvironmentName\n \n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nA name for the environment.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--size\n\n \nThe size of the EC2 instances to create in your environment (default: m3.medium).\n\n \n\n \n\n \n--min-count\n\n \nThe minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0).\n\n \n\n \n\n \n--user-data\n\n \nThe user data template to use for the environment's autoscaling group.\n\n \n\n \n\n \n--os\n\n \nThe operating system used in the environment. Options are \"linux\" or \"windows\" (default: linux).\n More information on windows environments is documented below\n\n \n\n \n\n \n--ami\n\n \nA custom AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system.\n\n \n\n\n\n\nThe user data template can be used to add custom configuration to your Layer0 environment.\nLayer0 uses \nGo Templates\n to render user data.\nCurrently, two variables are passed into the template: \nECSEnvironmentID\n and \nS3Bucket\n.\nPlease review the \nECS Tutorial\n\nto better understand how to write a user data template, and use at your own risk!\n\n\nLinux Environments\n: The default Layer0 user data template is:\n\n\n#!/bin/bash\necho ECS_CLUSTER={{ .ECSEnvironmentID }} \n /etc/ecs/ecs.config\necho ECS_ENGINE_AUTH_TYPE=dockercfg \n /etc/ecs/ecs.config\nyum install -y aws-cli awslogs jq\naws s3 cp s3://{{ .S3Bucket }}/bootstrap/dockercfg dockercfg\ncfg=$(cat dockercfg)\necho ECS_ENGINE_AUTH_DATA=$cfg \n /etc/ecs/ecs.config\ndocker pull amazon/amazon-ecs-agent:latest\nstart ecs\n\n\n\n\nWindows Environments\n: The default Layer0 user data template is:\n\n\npowershell\n\n# Set agent env variables for the Machine context (durable)\n$clusterName = \n{{ .ECSEnvironmentID }}\n\nWrite-Host Cluster name set as: $clusterName -foreground green\n\n[Environment]::SetEnvironmentVariable(\nECS_CLUSTER\n, $clusterName, \nMachine\n)\n[Environment]::SetEnvironmentVariable(\nECS_ENABLE_TASK_IAM_ROLE\n, \nfalse\n, \nMachine\n)\n$agentVersion = 'v1.14.0-1.windows.1'\n$agentZipUri = \nhttps://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-windows-$agentVersion.zip\n\n$agentZipMD5Uri = \n$agentZipUri.md5\n\n\n# Configure docker auth\nRead-S3Object -BucketName {{ .S3Bucket }} -Key bootstrap/dockercfg -File dockercfg.json\n$dockercfgContent = [IO.File]::ReadAllText(\ndockercfg.json\n)\n[Environment]::SetEnvironmentVariable(\nECS_ENGINE_AUTH_DATA\n, $dockercfgContent, \nMachine\n)\n[Environment]::SetEnvironmentVariable(\nECS_ENGINE_AUTH_TYPE\n, \ndockercfg\n, \nMachine\n)\n\n### --- Nothing user configurable after this point ---\n$ecsExeDir = \n$env:ProgramFiles\\Amazon\\ECS\n\n$zipFile = \n$env:TEMP\\ecs-agent.zip\n\n$md5File = \n$env:TEMP\\ecs-agent.zip.md5\n\n\n### Get the files from S3\nInvoke-RestMethod -OutFile $zipFile -Uri $agentZipUri\nInvoke-RestMethod -OutFile $md5File -Uri $agentZipMD5Uri\n\n## MD5 Checksum\n$expectedMD5 = (Get-Content $md5File)\n$md5 = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider\n$actualMD5 = [System.BitConverter]::ToString($md5.ComputeHash([System.IO.File]::ReadAllBytes($zipFile))).replace('-', '')\nif($expectedMD5 -ne $actualMD5) {\n echo \nDownload doesn't match hash.\n\n echo \nExpected: $expectedMD5 - Got: $actualMD5\n\n exit 1\n}\n\n## Put the executables in the executable directory.\nExpand-Archive -Path $zipFile -DestinationPath $ecsExeDir -Force\n\n## Start the agent script in the background.\n$jobname = \nECS-Agent-Init\n\n$script = \ncd '$ecsExeDir'; .\\amazon-ecs-agent.ps1\n\n$repeat = (New-TimeSpan -Minutes 1)\n$jobpath = $env:LOCALAPPDATA + \n\\Microsoft\\Windows\\PowerShell\\ScheduledJobs\\$jobname\\ScheduledJobDefinition.xml\n\n\nif($(Test-Path -Path $jobpath)) {\n echo \nJob definition already present\n\n exit 0\n}\n\n$scriptblock = [scriptblock]::Create(\n$script\n)\n$trigger = New-JobTrigger -At (Get-Date).Date -RepeatIndefinitely -RepetitionInterval $repeat -Once\n$options = New-ScheduledJobOption -RunElevated -ContinueIfGoingOnBattery -StartIfOnBattery\nRegister-ScheduledJob -Name $jobname -ScriptBlock $scriptblock -Trigger $trigger -ScheduledJobOption $options -RunNow\nAdd-JobTrigger -Name $jobname -Trigger (New-JobTrigger -AtStartup -RandomDelay 00:1:00)\n\n/powershell\n\n\npersist\ntrue\n/persist\n\n\n\n\n\n\nWindows Environments\nWindows containers are still in beta. \n\n\n\n\n\n\nYou can view the documented caveats with ECS \nhere\n.\nWhen creating Windows environments in Layer0, the root volume sizes for instances are 200GiB to accommodate the large size of the containers.\n\nIt can take as long as 45 minutes for a new windows container to come online. \n\n\nenvironment delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing Layer0 environment.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 environment delete\n [--wait] \nenvironmentName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the Layer0 environment that you want to delete.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--wait\n\n \nWait until the deletion is complete before exiting.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThis operation performs several tasks asynchronously. When run without the \n--wait\n option, this operation will most likely exit before all of these tasks are complete; when run with the \n--wait\n option, this operation will only exit once these tasks have completed.\n\n \n\n\n\n\nenvironment get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 environment.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 environment get\n \nenvironmentName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the Layer0 environment for which you want to view additional information.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe \nget\n subcommand supports wildcard matching: \nl0 environment get test*\n would return all environments beginning with \ntest\n.\n\n \n\n\n\n\nenvironment list\n#\n\n\nUse the \nlist\n subcommand to display a list of environments in your instance of Layer0.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 environment list\n\n \n\n\n\n\nenvironment setmincount\n#\n\n\nUse the \nsetmincount\n subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 enviroment setmincount\n \nenvironmentName\n \ncount\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the Layer0 environment that you want to delete.\n\n \n\n \n\n \ncount\n\n \nThe minimum number of instances allowed in the environment's autoscaling group.\n\n \n\n\n\n\nenvironment link\n#\n\n\nUse the \nlink\n subcommand to link two environments together. \nWhen environments are linked, services inside the environments are allowed to communicate with each other as if they were in the same environment. \nThis link is bidirectional. \nThis command is idempotent; it will succeed even if the two specified environments are already linked.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 environment link\n \nsourceEnvironmentName\n \ndestEnvironmentName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nsourceEnvironmentName\n\n \nThe name of the first environment to link.\n\n \n\n \n\n \ndestEnvironmentName\n\n \nThe name of the second environment to link. \n\n \n\n\n\n\nenvironment unlink\n#\n\n\nUse the \nunlink\n subcommand to remove the link between two environments.\nThis command is idempotent; it will succeed even if the link does not exist.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 environment unlink\n \nsourceEnvironmentName\n \ndestEnvironmentName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nsourceEnvironmentName\n\n \nThe name of the first environment to unlink.\n\n \n\n \n\n \ndestEnvironmentName\n\n \nThe name of the second environment to unlink. \n\n \n\n\n\n\n\n\nJob\n#\n\n\nA Job is a long-running unit of work performed on behalf of the Layer0 API.\nJobs are executed as Layer0 tasks that run in the \napi\n Environment.\nThe \njob\n command is used with the following subcommands: \nlogs\n, \ndelete\n, \nget\n, and \nlist\n.\n\n\njob logs\n#\n\n\nUse the \nlogs\n subcommand to display the logs from a Layer0 job that is currently running.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 job logs\n [--start \nMM/DD HH:MM\n] [--end \nMM/DD HH:MM\n] [--tail=\nN\n ] \njobName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \njobName\n\n \nThe name of the Layer0 job for which you want to view logs.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--start \nMM/DD HH:MM\n\n \nThe start of the time range to fetch logs.\n\n \n\n \n\n \n--end \nMM/DD HH:MM\n\n \nThe end of the time range to fetch logs.\n\n \n\n \n\n \n--tail=\nN\n\n \nDisplay only the last \nN\n lines of the log.\n\n \n\n\n\n\njob delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing job.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 job delete\n \njobName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \njobName\n\n \nThe name of the job that you want to delete.\n\n \n\n\n\n\njob get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 job.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 job get\n \njobName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \njobName\n\n \nThe name of an existing Layer0 job.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe \nget\n subcommand supports wildcard matching: \nl0 job get 2a55*\n would return all jobs beginning with \n2a55\n.\n\n \n\n\n\n\njob list\n#\n\n\nUse the \nlist\n subcommand to display information about all of the existing jobs in an instance of Layer0.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 job list\n\n \n\n\n\n\n\n\nLoadbalancer\n#\n\n\nA load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 \nservices\n. The \nloadbalancer\n command is used with the following subcommands: \ncreate\n, \ndelete\n, \naddport\n, \ndropport\n, \nget\n, \nlist\n, and \nhealthcheck\n.\n\n\nloadbalancer create\n#\n\n\nUse the \ncreate\n subcommand to create a new load balancer.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 loadbalancer create\n [--port \nport\n --port \nport\n ...] [--certificate \ncertificateName\n] [--private] [healthcheck-flags]\nenvironmentName loadBalancerName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the existing Layer0 environment in which you want to create the load balancer.\n\n \n\n \n\n \nloadBalancerName\n\n \nA name for the load balancer.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n\n --port \nhostPort:containerPort/protocol\n\n \n\n \n\n \nThe port configuration for the load balancer. \nhostPort\n is the port on which the load balancer will listen for traffic; \ncontainerPort\n is the port that traffic will be forwarded to. You can specify multiple ports using \n--port xxx --port yyy\n. If this option is not specified, Layer0 will use the following configuration: 80:80/tcp\n\n \n\n \n\n \n\n \n\n --certificate \ncertificateName\n\n \n\n \n\n \nThe name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.\n\n \n\n \n\n \n\n \n\n --private\n \n\n \n\n \nWhen you use this option, the load balancer will only be accessible from within the Layer0 environment.\n\n \n\n \n\n \n\n \n\n --healthcheck-target \ntarget\n\n \n\n \n\n \nThe target of the check. Valid pattern is \nPROTOCOL:PORT/PATH\n \n(default: \n\"TCP:80\"\n)\n\n \n\n If PROTOCOL is \nHTTP\n or \nHTTPS\n, both PORT and PATH are required\n \n\n - \nexample: \nHTTP:80/admin/healthcheck\n\n \n\n If PROTOCOL is \nTCP\n or \nSSL\n, PORT is required and PATH is not supported\n \n\n - \nexample: \nTCP:80\n\n \n\n \n\n \n\n \n\n --healthcheck-interval \ninterval\n\n \n\n \n\n \nThe interval between checks \n(default: \n30\n)\n.\n\n \n\n \n\n \n\n \n\n --healthcheck-timeout \ntimeout\n\n \n\n \n\n \nThe length of time before the check times out \n(default: \n5\n)\n.\n\n \n\n \n\n \n\n \n\n --healthcheck-healthy-threshold \nhealthyThreshold\n\n \n\n \n\n \nThe number of checks before the instance is declared healthy \n(default: \n2\n)\n.\n\n \n\n \n\n \n\n \n\n --healthcheck-unhealthy-threshold \nunhealthyThreshold\n\n \n\n \n\n \nThe number of checks before the instance is declared unhealthy \n(default: \n2\n)\n.\n\n \n\n \n\n\n\n\n\n\nPorts and Health Checks\n\n\nWhen both the \n--port\n and the \n--healthcheck-target\n options are omitted, Layer0 configures the load balancer with some default values: \n80:80/tcp\n for ports and \ntcp:80\n for healthcheck target.\nThese default values together create a load balancer configured with a simple but functioning health check, opening up a set of ports that allows traffic to the target of the healthcheck.\n(\n--healthcheck-target tcp:80\n tells the load balancer to ping its services at port 80 to determine their status, and \n--port 80:80/tcp\n configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services)\n\n\nWhen creating a load balancer with non-default configurations for either \n--port\n or \n--healthcheck-target\n, make sure that a valid \n--port\n and \n--healthcheck-target\n pairing is also created.\n\n\n\n\nloadbalancer delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing load balancer.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 loadbalancer delete\n [--wait] \nloadBalancerName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nloadBalancerName\n\n \nThe name of the load balancer that you want to delete.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--wait\n\n \nWait until the deletion is complete before exiting.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nIn order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer.\n\n \n\n \n\n \nThis operation performs several tasks asynchronously. When run without the \n--wait\n option, this operation will most likely exit before all of these tasks are complete; when run with the \n--wait\n option, this operation will only exit once these tasks have completed.\n\n \n\n\n\n\nloadbalancer addport\n#\n\n\nUse the \naddport\n subcommand to add a new port configuration to an existing Layer0 load balancer.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 loadbalancer addport\n \nloadBalancerName hostPort:containerPort/protocol\n [--certificate \ncertificateName\n]\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nloadBalancerName\n\n \nThe name of an existing Layer0 load balancer in which you want to add the port configuration.\n\n \n\n \n\n \nhostPort\n\n \nThe port that the load balancer will listen on.\n\n \n\n \n\n \ncontainerPort\n\n \nThe port that the load balancer will forward traffic to.\n\n \n\n \n\n \nprotocol\n\n \nThe protocol to use when forwarding traffic (acceptable values: tcp, ssl, http, and https).\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--certificate \ncertificateName\n\n \nThe name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe port configuration you specify must not already be in use by the load balancer you specify.\n\n \n\n\n\n\nloadbalancer dropport\n#\n\n\nUse the \ndropport\n subcommand to remove a port configuration from an existing Layer0 load balancer.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 loadbalancer dropport\n \nloadBalancerName\n \nhostPort\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nloadBalancerName\n\n \nThe name of an existing Layer0 load balancer in which you want to remove the port configuration.\n\n \n\n \n\n \nhostPort\n\n \nThe host port to remove from the load balancer.\n\n \n\n\n\n\nloadbalancer get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 load balancer.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 loadbalancer get\n \nenvironmentName:loadBalancerName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of an existing Layer0 environment.\n\n \n\n \n\n \nloadBalancerName\n\n \nThe name of an existing Layer0 load balancer.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe \nget\n subcommand supports wildcard matching: \nl0 loadbalancer get entrypoint*\n would return all jobs beginning with \nentrypoint\n.\n\n \n\n\n\n\nloadbalancer list\n#\n\n\nUse the \nlist\n subcommand to display information about all of the existing load balancers in an instance of Layer0.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 loadbalancer list\n\n \n\n\n\n\nloadbalancer healthcheck\n#\n\n\nUse the \nhealthcheck\n subcommand to display information about or update the configuration of a load balancer's health check.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 loadbalancer healthcheck\n [healthcheck-flags] \nloadbalancerName\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n\n --set-target \ntarget\n\n \n\n \n\n \nThe target of the check. Valid pattern is \nPROTOCOL:PORT/PATH\n, where PROTOCOL values are:\n \n\n \nHTTP\n or \nHTTPS\n: both PORT and PATH are required\n \n\n - \nexample: \nHTTP:80/admin/healthcheck\n\n \n\n \nTCP\n or \nSSL\n: PORT is required, PATH is not supported\n \n\n - \nexample: \nTCP:80\n\n \n\n \n\n \n\n \n\n --set-interval \ninterval\n\n \n\n \n\n \nThe interval between checks.\n\n \n\n \n\n \n\n \n\n --set-timeout \ntimeout\n\n \n\n \n\n \nThe length of time before the check times out.\n\n \n\n \n\n \n\n \n\n --set-healthy-threshold \nhealthyThreshold\n\n \n\n \n\n \nThe number of checks before the instance is declared healthy.\n\n \n\n \n\n \n\n \n\n --set-unhealthy-threshold \nunhealthyThreshold\n\n \n\n \n\n \nThe number of checks before the instance is declared unhealthy.\n\n \n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nCalling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged.\n\n \n\n\n\n\n\n\n\nService\n#\n\n\nA service is a component of a Layer0 environment. The purpose of a service is to execute a Docker image specified in a \ndeploy\n. In order to create a service, you must first create an \nenvironment\n and a \ndeploy\n; in most cases, you should also create a \nload balancer\n before creating the service.\n\n\nThe \nservice\n command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n, \nupdate\n, \nlist\n, \nlogs\n, and \nscale\n.\n\n\nservice create\n#\n\n\nUse the \ncreate\n subcommand to create a Layer0 service.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 service create\n [--loadbalancer \nenvironmentName:loadBalancerName\n ] [--no-logs] \nenvironmentName serviceName deployName:deployVersion\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nserviceName\n\n \nA name for the service that you are creating.\n\n \n\n \n\n \nenvironmentName\n\n \nThe name of an existing Layer0 environment.\n\n \n\n \n\n \ndeployName\n\n \nThe name of a Layer0 deploy that exists in the environment \nenvironmentName\n.\n\n \n\n \n\n \ndeployVersion\n\n \nThe version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--loadbalancer \nenvironmentName:loadBalancerName\n\n \nPlace the new service behind an existing load balancer named \nloadBalancerName\n in the environment named \nenvironmentName\n.\n\n \n\n \n\n \n--no-logs\n\n \nDisable cloudwatch logging for the service\n\n \n\n\n\n\nservice update\n#\n\n\nUse the \nupdate\n subcommand to apply an existing Layer0 Deploy to an existing Layer0 service.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 service update\n [--no-logs] \nenvironmentName:serviceName deployName:deployVersion\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the Layer0 environment in which the service resides.\n\n \n\n \n\n \nserviceName\n\n \nThe name of an existing Layer0 service into which you want to apply the deploy.\n\n \n\n \n\n \ndeployName\n\n \nThe name of the Layer0 deploy that you want to apply to the service.\n\n \n\n \n\n \ndeployVersion\n\n \nThe version of the Layer0 deploy that you want to apply to the service. If you do not specify a version number, the latest version of the deploy will be applied.\n\n \n\n \n\n \n--no-logs\n\n \nDisable cloudwatch logging for the service\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nIf your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead.\n\n \n\n\n\n\n\nservice delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing Layer0 service.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 service delete\n [--wait] \nenvironmentName:serviceName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the Layer0 environment that contains the service you want to delete.\n\n \n\n \n\n \nserviceName\n\n \nThe name of the Layer0 service that you want to delete.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--wait\n\n \nWait until the deletion is complete before exiting.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThis operation performs several tasks asynchronously. When run without the \n--wait\n option, this operation will most likely exit before all of these tasks are complete; when run with the \n--wait\n option, this operation will only exit once these tasks have completed.\n\n \n\n\n\n\nservice get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 service.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 service get\n \nenvironmentName:serviceName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of an existing Layer0 environment.\n\n \n\n \n\n \nserviceName\n\n \nThe name of an existing Layer0 service.\n\n \n\n\n\n\nservice list\n#\n\n\nUse the \nlist\n subcommand to list all of the existing services in your Layer0 instance.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 service list\n\n \n\n\n\n\nservice logs\n#\n\n\nUse the \nlogs\n subcommand to display the logs from a Layer0 service that is currently running.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 service logs\n [--start \nMM/DD HH:MM\n] [--end \nMM/DD HH:MM\n] [--tail=\nN\n ] \nserviceName\n \n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nserviceName\n\n \nThe name of the Layer0 service for which you want to view logs.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--start \nMM/DD HH:MM\n\n \nThe start of the time range to fetch logs.\n\n \n\n \n\n \n--end \nMM/DD HH:MM\n\n \nThe end of the time range to fetch logs.\n\n \n\n \n\n \n--tail=\nN\n\n \nDisplay only the last \nN\n lines of the log.\n\n \n\n\n\n\nservice scale\n#\n\n\nUse the \nscale\n subcommand to specify how many copies of an existing Layer0 service should run.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 service scale\n \nenvironmentName:serviceName N\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the Layer0 environment that contains the service that you want to scale.\n\n \n\n \n\n \nserviceName\n\n \nThe name of the Layer0 service that you want to scale up.\n\n \n\n \n\n \nN\n\n \nThe number of copies of the specified service that should be run.\n\n \n\n\n\n\n\n\nTask\n#\n\n\nA Layer0 task is a component of an environment. A task executes the contents of a Docker image, as specified in a deploy. A task differs from a service in that a task does not restart after exiting. Additionally, ports are not exposed when using a task.\n\n\nThe \ntask\n command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n, \nlist\n, and \nlogs\n.\n\n\ntask create\n#\n\n\nUse the \ncreate\n subcommand to create a Layer0 task.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 task create\n [--no-logs] [--copies \ncopies\n] \nenvironmentName taskName deployName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \nenvironmentName\n\n \nThe name of the existing Layer0 environment in which you want to create the task.\n\n \n\n \n\n \ntaskName\n\n \nA name for the task.\n\n \n\n \n\n \ndeployName\n\n \nThe name of an existing Layer0 deploy that the task should use.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--copies\n\n \nThe number of copies of the task to run (default: 1)\n\n \n\n \n\n \n--no-logs\n\n \nDisable cloudwatch logging for the service\n\n \n\n\n\n\ntask delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing Layer0 task.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 task delete\n [\nenvironmentName\n:]\ntaskName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \ntaskName\n\n \nThe name of the Layer0 task that you want to delete.\n\n \n\n\n\n\nOptional parameters\n#\n\n\n\n \n\n \n[\nenvironmentName\n:]\n\n \nThe name of the Layer0 environment that contains the task. This parameter is only necessary if multiple environments contain tasks with exactly the same name.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nUntil the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour.\n\n \n\n\n\n\ntask get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 task (\ntaskName\n).\n\n\nUsage\n#\n\n\n\n \n\n \nl0 task get\n [\nenvironmentName\n:]\ntaskName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \ntaskName\n\n \nThe name of a Layer0 task for which you want to see information.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe value of \ntaskName\n does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in \ntaskName\n, then information about all matching tasks will be returned.\n\n \n\n\n\n\ntask list\n#\n\n\nUse the \ntask\n subcommand to display a list of running tasks in your Layer0.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 task list\n\n \n\n\n\n\ntask logs\n#\n\n\nUse the \nlogs\n subcommand to display logs for a running Layer0 task.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 task logs\n [--start \nMM/DD HH:MM\n] [--end \nMM/DD HH:MM\n] [--tail=\nN\n ] \ntaskName\n\n \n\n\n\n\nRequired parameters\n#\n\n\n\n \n\n \ntaskName\n\n \nThe name of an existing Layer0 task.\n\n \n\n\n\n\nOptional arguments\n#\n\n\n\n \n\n \n--start \nMM/DD HH:MM\n\n \nThe start of the time range to fetch logs.\n\n \n\n \n\n \n--end \nMM/DD HH:MM\n\n \nThe end of the time range to fetch logs.\n\n \n\n \n\n \n--tail=\nN\n\n \nDisplay only the last \nN\n lines of the log.\n\n \n\n\n\n\nAdditional information\n#\n\n\n\n \n\n \nThe value of \ntaskName\n does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in \ntaskName\n, then information about all matching tasks will be returned.\n\n \n\n\n\n\ntask list\n#\n\n\nUse the \ntask\n subcommand to display a list of running tasks in your Layer0.\n\n\nUsage\n#\n\n\n\n \n\n \nl0 task list\n\n \n\n\n\n=======", - "title": "Layer0 CLI" - }, - { - "location": "/reference/cli/#layer0-cli-reference", - "text": "", - "title": "Layer0 CLI Reference" - }, - { - "location": "/reference/cli/#global-options", - "text": "The l0 application is designed to be used with one of several subcommands: admin , deploy , environment , job , loadbalancer , service , and task . These subcommands are detailed in the sections below. There are, however, some global parameters that you may specify when using l0 .", - "title": "Global options" - }, - { - "location": "/reference/cli/#usage", - "text": "l0 [ globalOptions ] command subcommand [ options ] [ parameters ]", - "title": "Usage" - }, - { - "location": "/reference/cli/#optional-arguments", - "text": "--output {text|json} \n Specify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the --output json option, you can force l0 to output JSON-formatted text. \n \n \n --version \n Display the version number of the l0 application.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#admin", - "text": "The admin command is used to manage the Layer0 API server. This command is used with the following subcommands: debug , sql , and version .", - "title": "Admin" - }, - { - "location": "/reference/cli/#admin-debug", - "text": "Use the debug subcommand to view the running version of your Layer0 API server and CLI.", - "title": "admin debug" - }, - { - "location": "/reference/cli/#usage_1", - "text": "l0 admin debug", - "title": "Usage" - }, - { - "location": "/reference/cli/#admin-sql", - "text": "Use the sql subcommand to initialize the Layer0 API database.", - "title": "admin sql" - }, - { - "location": "/reference/cli/#usage_2", - "text": "l0 admin sql", - "title": "Usage" - }, - { - "location": "/reference/cli/#additional-information", - "text": "The sql subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#admin-version", - "text": "Use the version subcommand to display the current version of the Layer0 API.", - "title": "admin version" - }, - { - "location": "/reference/cli/#usage_3", - "text": "l0 admin version", - "title": "Usage" - }, - { - "location": "/reference/cli/#deploy", - "text": "", - "title": "Deploy" - }, - { - "location": "/reference/cli/#deploy-create", - "text": "Use the create subcommand to upload a Docker task definition into Layer0. This command is used with the following subcommands: create , delete , get and list .", - "title": "deploy create" - }, - { - "location": "/reference/cli/#usage_4", - "text": "l0 deploy create dockerPath deployName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters", - "text": "dockerPath \n The path to the Docker task definition that you want to upload. \n \n \n deployName \n A name for the deploy.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#additional-information_1", - "text": "If deployName exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version. \n \n \n If you use Visual Studio to modify or create your Dockerrun file, you may see an \"Invalid Dockerrun.aws.json\" error. This error is caused by the default encoding used by Visual Studio. See the \"Common issues\" page for steps to resolve this issue. \n \n \n \nDeploys created through Layer0 are rendered with a logConfiguration section for each container.\nIf a logConfiguration section already exists, no changes are made to the section.\nThe additional section enables logs from each container to be sent to the the Layer0 log group.\nThis is where logs are looked up during l0 entity logs commands.\nThe added logConfiguration section uses the following template: logConfiguration : {\n logDriver : awslogs ,\n options : {\n awslogs-group : l0- prefix ,\n awslogs-region : region ,\n awslogs-stream-prefix : l0 \n }\n }\n}", - "title": "Additional information" - }, - { - "location": "/reference/cli/#deploy-delete", - "text": "Use the delete subcommand to delete a version of a Layer0 deploy.", - "title": "deploy delete" - }, - { - "location": "/reference/cli/#usage_5", - "text": "l0 deploy delete deployID", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_1", - "text": "deployID \n The unique identifier of the version of the deploy that you want to delete. You can obtain a list of deployIDs for a given deploy by executing the following command: l0 deploy get deployName", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#deploy-get", - "text": "Use the get subcommand to view information about an existing Layer0 deploy.", - "title": "deploy get" - }, - { - "location": "/reference/cli/#usage_6", - "text": "l0 deploy get deployName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_2", - "text": "deployName \n The name of the Layer0 deploy for which you want to view additional information.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#additional-information_2", - "text": "The get subcommand supports wildcard matching: l0 deploy get dep* would return all deploys beginning with dep .", - "title": "Additional information" - }, - { - "location": "/reference/cli/#deploy-list", - "text": "Use the list subcommand to view a list of deploys in your instance of Layer0.", - "title": "deploy list" - }, - { - "location": "/reference/cli/#usage_7", - "text": "l0 deploy list", - "title": "Usage" - }, - { - "location": "/reference/cli/#environment", - "text": "Layer0 environments allow you to isolate services and load balancers for specific applications.\nThe environment command is used to manage Layer0 environments. This command is used with the following subcommands: create , delete , get , list , and setmincount .", - "title": "Environment" - }, - { - "location": "/reference/cli/#environment-create", - "text": "Use the create subcommand to create an additional Layer0 environment ( environmentName ).", - "title": "environment create" - }, - { - "location": "/reference/cli/#usage_8", - "text": "l0 environment create [--size] [--min-count] [--user-data] [--os] [--ami] environmentName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_3", - "text": "environmentName \n A name for the environment.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_1", - "text": "--size \n The size of the EC2 instances to create in your environment (default: m3.medium). \n \n \n --min-count \n The minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0). \n \n \n --user-data \n The user data template to use for the environment's autoscaling group. \n \n \n --os \n The operating system used in the environment. Options are \"linux\" or \"windows\" (default: linux).\n More information on windows environments is documented below \n \n \n --ami \n A custom AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system. \n The user data template can be used to add custom configuration to your Layer0 environment.\nLayer0 uses Go Templates to render user data.\nCurrently, two variables are passed into the template: ECSEnvironmentID and S3Bucket .\nPlease review the ECS Tutorial \nto better understand how to write a user data template, and use at your own risk! Linux Environments : The default Layer0 user data template is: #!/bin/bash\necho ECS_CLUSTER={{ .ECSEnvironmentID }} /etc/ecs/ecs.config\necho ECS_ENGINE_AUTH_TYPE=dockercfg /etc/ecs/ecs.config\nyum install -y aws-cli awslogs jq\naws s3 cp s3://{{ .S3Bucket }}/bootstrap/dockercfg dockercfg\ncfg=$(cat dockercfg)\necho ECS_ENGINE_AUTH_DATA=$cfg /etc/ecs/ecs.config\ndocker pull amazon/amazon-ecs-agent:latest\nstart ecs Windows Environments : The default Layer0 user data template is: powershell \n# Set agent env variables for the Machine context (durable)\n$clusterName = {{ .ECSEnvironmentID }} \nWrite-Host Cluster name set as: $clusterName -foreground green\n\n[Environment]::SetEnvironmentVariable( ECS_CLUSTER , $clusterName, Machine )\n[Environment]::SetEnvironmentVariable( ECS_ENABLE_TASK_IAM_ROLE , false , Machine )\n$agentVersion = 'v1.14.0-1.windows.1'\n$agentZipUri = https://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-windows-$agentVersion.zip \n$agentZipMD5Uri = $agentZipUri.md5 \n\n# Configure docker auth\nRead-S3Object -BucketName {{ .S3Bucket }} -Key bootstrap/dockercfg -File dockercfg.json\n$dockercfgContent = [IO.File]::ReadAllText( dockercfg.json )\n[Environment]::SetEnvironmentVariable( ECS_ENGINE_AUTH_DATA , $dockercfgContent, Machine )\n[Environment]::SetEnvironmentVariable( ECS_ENGINE_AUTH_TYPE , dockercfg , Machine )\n\n### --- Nothing user configurable after this point ---\n$ecsExeDir = $env:ProgramFiles\\Amazon\\ECS \n$zipFile = $env:TEMP\\ecs-agent.zip \n$md5File = $env:TEMP\\ecs-agent.zip.md5 \n\n### Get the files from S3\nInvoke-RestMethod -OutFile $zipFile -Uri $agentZipUri\nInvoke-RestMethod -OutFile $md5File -Uri $agentZipMD5Uri\n\n## MD5 Checksum\n$expectedMD5 = (Get-Content $md5File)\n$md5 = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider\n$actualMD5 = [System.BitConverter]::ToString($md5.ComputeHash([System.IO.File]::ReadAllBytes($zipFile))).replace('-', '')\nif($expectedMD5 -ne $actualMD5) {\n echo Download doesn't match hash. \n echo Expected: $expectedMD5 - Got: $actualMD5 \n exit 1\n}\n\n## Put the executables in the executable directory.\nExpand-Archive -Path $zipFile -DestinationPath $ecsExeDir -Force\n\n## Start the agent script in the background.\n$jobname = ECS-Agent-Init \n$script = cd '$ecsExeDir'; .\\amazon-ecs-agent.ps1 \n$repeat = (New-TimeSpan -Minutes 1)\n$jobpath = $env:LOCALAPPDATA + \\Microsoft\\Windows\\PowerShell\\ScheduledJobs\\$jobname\\ScheduledJobDefinition.xml \n\nif($(Test-Path -Path $jobpath)) {\n echo Job definition already present \n exit 0\n}\n\n$scriptblock = [scriptblock]::Create( $script )\n$trigger = New-JobTrigger -At (Get-Date).Date -RepeatIndefinitely -RepetitionInterval $repeat -Once\n$options = New-ScheduledJobOption -RunElevated -ContinueIfGoingOnBattery -StartIfOnBattery\nRegister-ScheduledJob -Name $jobname -ScriptBlock $scriptblock -Trigger $trigger -ScheduledJobOption $options -RunNow\nAdd-JobTrigger -Name $jobname -Trigger (New-JobTrigger -AtStartup -RandomDelay 00:1:00) /powershell persist true /persist Windows Environments Windows containers are still in beta. You can view the documented caveats with ECS here .\nWhen creating Windows environments in Layer0, the root volume sizes for instances are 200GiB to accommodate the large size of the containers. \nIt can take as long as 45 minutes for a new windows container to come online.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#environment-delete", - "text": "Use the delete subcommand to delete an existing Layer0 environment.", - "title": "environment delete" - }, - { - "location": "/reference/cli/#usage_9", - "text": "l0 environment delete [--wait] environmentName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_4", - "text": "environmentName \n The name of the Layer0 environment that you want to delete.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_2", - "text": "--wait \n Wait until the deletion is complete before exiting.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#additional-information_3", - "text": "This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#environment-get", - "text": "Use the get subcommand to display information about an existing Layer0 environment.", - "title": "environment get" - }, - { - "location": "/reference/cli/#usage_10", - "text": "l0 environment get environmentName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_5", - "text": "environmentName \n The name of the Layer0 environment for which you want to view additional information.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#additional-information_4", - "text": "The get subcommand supports wildcard matching: l0 environment get test* would return all environments beginning with test .", - "title": "Additional information" - }, - { - "location": "/reference/cli/#environment-list", - "text": "Use the list subcommand to display a list of environments in your instance of Layer0.", - "title": "environment list" - }, - { - "location": "/reference/cli/#usage_11", - "text": "l0 environment list", - "title": "Usage" - }, - { - "location": "/reference/cli/#environment-setmincount", - "text": "Use the setmincount subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group.", - "title": "environment setmincount" - }, - { - "location": "/reference/cli/#usage_12", - "text": "l0 enviroment setmincount environmentName count", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_6", - "text": "environmentName \n The name of the Layer0 environment that you want to delete. \n \n \n count \n The minimum number of instances allowed in the environment's autoscaling group.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#environment-link", - "text": "Use the link subcommand to link two environments together. \nWhen environments are linked, services inside the environments are allowed to communicate with each other as if they were in the same environment. \nThis link is bidirectional. \nThis command is idempotent; it will succeed even if the two specified environments are already linked.", - "title": "environment link" - }, - { - "location": "/reference/cli/#usage_13", - "text": "l0 environment link sourceEnvironmentName destEnvironmentName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_7", - "text": "sourceEnvironmentName \n The name of the first environment to link. \n \n \n destEnvironmentName \n The name of the second environment to link.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#environment-unlink", - "text": "Use the unlink subcommand to remove the link between two environments.\nThis command is idempotent; it will succeed even if the link does not exist.", - "title": "environment unlink" - }, - { - "location": "/reference/cli/#usage_14", - "text": "l0 environment unlink sourceEnvironmentName destEnvironmentName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_8", - "text": "sourceEnvironmentName \n The name of the first environment to unlink. \n \n \n destEnvironmentName \n The name of the second environment to unlink.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#job", - "text": "A Job is a long-running unit of work performed on behalf of the Layer0 API.\nJobs are executed as Layer0 tasks that run in the api Environment.\nThe job command is used with the following subcommands: logs , delete , get , and list .", - "title": "Job" - }, - { - "location": "/reference/cli/#job-logs", - "text": "Use the logs subcommand to display the logs from a Layer0 job that is currently running.", - "title": "job logs" - }, - { - "location": "/reference/cli/#usage_15", - "text": "l0 job logs [--start MM/DD HH:MM ] [--end MM/DD HH:MM ] [--tail= N ] jobName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_9", - "text": "jobName \n The name of the Layer0 job for which you want to view logs.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_3", - "text": "--start MM/DD HH:MM \n The start of the time range to fetch logs. \n \n \n --end MM/DD HH:MM \n The end of the time range to fetch logs. \n \n \n --tail= N \n Display only the last N lines of the log.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#job-delete", - "text": "Use the delete subcommand to delete an existing job.", - "title": "job delete" - }, - { - "location": "/reference/cli/#usage_16", - "text": "l0 job delete jobName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_10", - "text": "jobName \n The name of the job that you want to delete.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#job-get", - "text": "Use the get subcommand to display information about an existing Layer0 job.", - "title": "job get" - }, - { - "location": "/reference/cli/#usage_17", - "text": "l0 job get jobName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_11", - "text": "jobName \n The name of an existing Layer0 job.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#additional-information_5", - "text": "The get subcommand supports wildcard matching: l0 job get 2a55* would return all jobs beginning with 2a55 .", - "title": "Additional information" - }, - { - "location": "/reference/cli/#job-list", - "text": "Use the list subcommand to display information about all of the existing jobs in an instance of Layer0.", - "title": "job list" - }, - { - "location": "/reference/cli/#usage_18", - "text": "l0 job list", - "title": "Usage" - }, - { - "location": "/reference/cli/#loadbalancer", - "text": "A load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 services . The loadbalancer command is used with the following subcommands: create , delete , addport , dropport , get , list , and healthcheck .", - "title": "Loadbalancer" - }, - { - "location": "/reference/cli/#loadbalancer-create", - "text": "Use the create subcommand to create a new load balancer.", - "title": "loadbalancer create" - }, - { - "location": "/reference/cli/#usage_19", - "text": "l0 loadbalancer create [--port port --port port ...] [--certificate certificateName ] [--private] [healthcheck-flags] environmentName loadBalancerName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_12", - "text": "environmentName \n The name of the existing Layer0 environment in which you want to create the load balancer. \n \n \n loadBalancerName \n A name for the load balancer.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_4", - "text": "--port hostPort:containerPort/protocol \n \n \n The port configuration for the load balancer. hostPort is the port on which the load balancer will listen for traffic; containerPort is the port that traffic will be forwarded to. You can specify multiple ports using --port xxx --port yyy . If this option is not specified, Layer0 will use the following configuration: 80:80/tcp \n \n \n \n \n --certificate certificateName \n \n \n The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration. \n \n \n \n \n --private\n \n \n When you use this option, the load balancer will only be accessible from within the Layer0 environment. \n \n \n \n \n --healthcheck-target target \n \n \n The target of the check. Valid pattern is PROTOCOL:PORT/PATH (default: \"TCP:80\" ) \n \n If PROTOCOL is HTTP or HTTPS , both PORT and PATH are required\n \n - example: HTTP:80/admin/healthcheck \n \n If PROTOCOL is TCP or SSL , PORT is required and PATH is not supported\n \n - example: TCP:80 \n \n \n \n \n --healthcheck-interval interval \n \n \n The interval between checks (default: 30 ) . \n \n \n \n \n --healthcheck-timeout timeout \n \n \n The length of time before the check times out (default: 5 ) . \n \n \n \n \n --healthcheck-healthy-threshold healthyThreshold \n \n \n The number of checks before the instance is declared healthy (default: 2 ) . \n \n \n \n \n --healthcheck-unhealthy-threshold unhealthyThreshold \n \n \n The number of checks before the instance is declared unhealthy (default: 2 ) . \n \n Ports and Health Checks When both the --port and the --healthcheck-target options are omitted, Layer0 configures the load balancer with some default values: 80:80/tcp for ports and tcp:80 for healthcheck target.\nThese default values together create a load balancer configured with a simple but functioning health check, opening up a set of ports that allows traffic to the target of the healthcheck.\n( --healthcheck-target tcp:80 tells the load balancer to ping its services at port 80 to determine their status, and --port 80:80/tcp configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services) When creating a load balancer with non-default configurations for either --port or --healthcheck-target , make sure that a valid --port and --healthcheck-target pairing is also created.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#loadbalancer-delete", - "text": "Use the delete subcommand to delete an existing load balancer.", - "title": "loadbalancer delete" - }, - { - "location": "/reference/cli/#usage_20", - "text": "l0 loadbalancer delete [--wait] loadBalancerName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_13", - "text": "loadBalancerName \n The name of the load balancer that you want to delete.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_5", - "text": "--wait \n Wait until the deletion is complete before exiting.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#additional-information_6", - "text": "In order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer. \n \n \n This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#loadbalancer-addport", - "text": "Use the addport subcommand to add a new port configuration to an existing Layer0 load balancer.", - "title": "loadbalancer addport" - }, - { - "location": "/reference/cli/#usage_21", - "text": "l0 loadbalancer addport loadBalancerName hostPort:containerPort/protocol [--certificate certificateName ]", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_14", - "text": "loadBalancerName \n The name of an existing Layer0 load balancer in which you want to add the port configuration. \n \n \n hostPort \n The port that the load balancer will listen on. \n \n \n containerPort \n The port that the load balancer will forward traffic to. \n \n \n protocol \n The protocol to use when forwarding traffic (acceptable values: tcp, ssl, http, and https).", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_6", - "text": "--certificate certificateName \n The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#additional-information_7", - "text": "The port configuration you specify must not already be in use by the load balancer you specify.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#loadbalancer-dropport", - "text": "Use the dropport subcommand to remove a port configuration from an existing Layer0 load balancer.", - "title": "loadbalancer dropport" - }, - { - "location": "/reference/cli/#usage_22", - "text": "l0 loadbalancer dropport loadBalancerName hostPort", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_15", - "text": "loadBalancerName \n The name of an existing Layer0 load balancer in which you want to remove the port configuration. \n \n \n hostPort \n The host port to remove from the load balancer.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#loadbalancer-get", - "text": "Use the get subcommand to display information about an existing Layer0 load balancer.", - "title": "loadbalancer get" - }, - { - "location": "/reference/cli/#usage_23", - "text": "l0 loadbalancer get environmentName:loadBalancerName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_16", - "text": "environmentName \n The name of an existing Layer0 environment. \n \n \n loadBalancerName \n The name of an existing Layer0 load balancer.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#additional-information_8", - "text": "The get subcommand supports wildcard matching: l0 loadbalancer get entrypoint* would return all jobs beginning with entrypoint .", - "title": "Additional information" - }, - { - "location": "/reference/cli/#loadbalancer-list", - "text": "Use the list subcommand to display information about all of the existing load balancers in an instance of Layer0.", - "title": "loadbalancer list" - }, - { - "location": "/reference/cli/#usage_24", - "text": "l0 loadbalancer list", - "title": "Usage" - }, - { - "location": "/reference/cli/#loadbalancer-healthcheck", - "text": "Use the healthcheck subcommand to display information about or update the configuration of a load balancer's health check.", - "title": "loadbalancer healthcheck" - }, - { - "location": "/reference/cli/#usage_25", - "text": "l0 loadbalancer healthcheck [healthcheck-flags] loadbalancerName", - "title": "Usage" - }, - { - "location": "/reference/cli/#optional-arguments_7", - "text": "--set-target target \n \n \n The target of the check. Valid pattern is PROTOCOL:PORT/PATH , where PROTOCOL values are:\n \n HTTP or HTTPS : both PORT and PATH are required\n \n - example: HTTP:80/admin/healthcheck \n \n TCP or SSL : PORT is required, PATH is not supported\n \n - example: TCP:80 \n \n \n \n \n --set-interval interval \n \n \n The interval between checks. \n \n \n \n \n --set-timeout timeout \n \n \n The length of time before the check times out. \n \n \n \n \n --set-healthy-threshold healthyThreshold \n \n \n The number of checks before the instance is declared healthy. \n \n \n \n \n --set-unhealthy-threshold unhealthyThreshold \n \n \n The number of checks before the instance is declared unhealthy.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#additional-information_9", - "text": "Calling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#service", - "text": "A service is a component of a Layer0 environment. The purpose of a service is to execute a Docker image specified in a deploy . In order to create a service, you must first create an environment and a deploy ; in most cases, you should also create a load balancer before creating the service. The service command is used with the following subcommands: create , delete , get , update , list , logs , and scale .", - "title": "Service" - }, - { - "location": "/reference/cli/#service-create", - "text": "Use the create subcommand to create a Layer0 service.", - "title": "service create" - }, - { - "location": "/reference/cli/#usage_26", - "text": "l0 service create [--loadbalancer environmentName:loadBalancerName ] [--no-logs] environmentName serviceName deployName:deployVersion", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_17", - "text": "serviceName \n A name for the service that you are creating. \n \n \n environmentName \n The name of an existing Layer0 environment. \n \n \n deployName \n The name of a Layer0 deploy that exists in the environment environmentName . \n \n \n deployVersion \n The version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_8", - "text": "--loadbalancer environmentName:loadBalancerName \n Place the new service behind an existing load balancer named loadBalancerName in the environment named environmentName . \n \n \n --no-logs \n Disable cloudwatch logging for the service", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#service-update", - "text": "Use the update subcommand to apply an existing Layer0 Deploy to an existing Layer0 service.", - "title": "service update" - }, - { - "location": "/reference/cli/#usage_27", - "text": "l0 service update [--no-logs] environmentName:serviceName deployName:deployVersion", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_18", - "text": "environmentName \n The name of the Layer0 environment in which the service resides. \n \n \n serviceName \n The name of an existing Layer0 service into which you want to apply the deploy. \n \n \n deployName \n The name of the Layer0 deploy that you want to apply to the service. \n \n \n deployVersion \n The version of the Layer0 deploy that you want to apply to the service. If you do not specify a version number, the latest version of the deploy will be applied. \n \n \n --no-logs \n Disable cloudwatch logging for the service", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#additional-information_10", - "text": "If your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#service-delete", - "text": "Use the delete subcommand to delete an existing Layer0 service.", - "title": "service delete" - }, - { - "location": "/reference/cli/#usage_28", - "text": "l0 service delete [--wait] environmentName:serviceName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_19", - "text": "environmentName \n The name of the Layer0 environment that contains the service you want to delete. \n \n \n serviceName \n The name of the Layer0 service that you want to delete.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_9", - "text": "--wait \n Wait until the deletion is complete before exiting.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#additional-information_11", - "text": "This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#service-get", - "text": "Use the get subcommand to display information about an existing Layer0 service.", - "title": "service get" - }, - { - "location": "/reference/cli/#usage_29", - "text": "l0 service get environmentName:serviceName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_20", - "text": "environmentName \n The name of an existing Layer0 environment. \n \n \n serviceName \n The name of an existing Layer0 service.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#service-list", - "text": "Use the list subcommand to list all of the existing services in your Layer0 instance.", - "title": "service list" - }, - { - "location": "/reference/cli/#usage_30", - "text": "l0 service list", - "title": "Usage" - }, - { - "location": "/reference/cli/#service-logs", - "text": "Use the logs subcommand to display the logs from a Layer0 service that is currently running.", - "title": "service logs" - }, - { - "location": "/reference/cli/#usage_31", - "text": "l0 service logs [--start MM/DD HH:MM ] [--end MM/DD HH:MM ] [--tail= N ] serviceName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_21", - "text": "serviceName \n The name of the Layer0 service for which you want to view logs.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_10", - "text": "--start MM/DD HH:MM \n The start of the time range to fetch logs. \n \n \n --end MM/DD HH:MM \n The end of the time range to fetch logs. \n \n \n --tail= N \n Display only the last N lines of the log.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#service-scale", - "text": "Use the scale subcommand to specify how many copies of an existing Layer0 service should run.", - "title": "service scale" - }, - { - "location": "/reference/cli/#usage_32", - "text": "l0 service scale environmentName:serviceName N", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_22", - "text": "environmentName \n The name of the Layer0 environment that contains the service that you want to scale. \n \n \n serviceName \n The name of the Layer0 service that you want to scale up. \n \n \n N \n The number of copies of the specified service that should be run.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#task", - "text": "A Layer0 task is a component of an environment. A task executes the contents of a Docker image, as specified in a deploy. A task differs from a service in that a task does not restart after exiting. Additionally, ports are not exposed when using a task. The task command is used with the following subcommands: create , delete , get , list , and logs .", - "title": "Task" - }, - { - "location": "/reference/cli/#task-create", - "text": "Use the create subcommand to create a Layer0 task.", - "title": "task create" - }, - { - "location": "/reference/cli/#usage_33", - "text": "l0 task create [--no-logs] [--copies copies ] environmentName taskName deployName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_23", - "text": "environmentName \n The name of the existing Layer0 environment in which you want to create the task. \n \n \n taskName \n A name for the task. \n \n \n deployName \n The name of an existing Layer0 deploy that the task should use.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_11", - "text": "--copies \n The number of copies of the task to run (default: 1) \n \n \n --no-logs \n Disable cloudwatch logging for the service", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#task-delete", - "text": "Use the delete subcommand to delete an existing Layer0 task.", - "title": "task delete" - }, - { - "location": "/reference/cli/#usage_34", - "text": "l0 task delete [ environmentName :] taskName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_24", - "text": "taskName \n The name of the Layer0 task that you want to delete.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-parameters", - "text": "[ environmentName :] \n The name of the Layer0 environment that contains the task. This parameter is only necessary if multiple environments contain tasks with exactly the same name.", - "title": "Optional parameters" - }, - { - "location": "/reference/cli/#additional-information_12", - "text": "Until the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#task-get", - "text": "Use the get subcommand to display information about an existing Layer0 task ( taskName ).", - "title": "task get" - }, - { - "location": "/reference/cli/#usage_35", - "text": "l0 task get [ environmentName :] taskName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_25", - "text": "taskName \n The name of a Layer0 task for which you want to see information.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#additional-information_13", - "text": "The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName , then information about all matching tasks will be returned.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#task-list", - "text": "Use the task subcommand to display a list of running tasks in your Layer0.", - "title": "task list" - }, - { - "location": "/reference/cli/#usage_36", - "text": "l0 task list", - "title": "Usage" - }, - { - "location": "/reference/cli/#task-logs", - "text": "Use the logs subcommand to display logs for a running Layer0 task.", - "title": "task logs" - }, - { - "location": "/reference/cli/#usage_37", - "text": "l0 task logs [--start MM/DD HH:MM ] [--end MM/DD HH:MM ] [--tail= N ] taskName", - "title": "Usage" - }, - { - "location": "/reference/cli/#required-parameters_26", - "text": "taskName \n The name of an existing Layer0 task.", - "title": "Required parameters" - }, - { - "location": "/reference/cli/#optional-arguments_12", - "text": "--start MM/DD HH:MM \n The start of the time range to fetch logs. \n \n \n --end MM/DD HH:MM \n The end of the time range to fetch logs. \n \n \n --tail= N \n Display only the last N lines of the log.", - "title": "Optional arguments" - }, - { - "location": "/reference/cli/#additional-information_14", - "text": "The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName , then information about all matching tasks will be returned.", - "title": "Additional information" - }, - { - "location": "/reference/cli/#task-list_1", - "text": "Use the task subcommand to display a list of running tasks in your Layer0.", - "title": "task list" - }, - { - "location": "/reference/cli/#usage_38", - "text": "l0 task list \n \n=======", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/", - "text": "Layer0 Setup Reference\n#\n\n\nThe Layer0 Setup application (commonly called \nl0-setup\n), is used to provision, update, and destroy Layer0 instances.\n\n\n\n\nGeneral Usage\n#\n\n\nYou can use the \n-h, --help\n command to get generate information about the \nl0-setup\n tool:\n\n\n\n\nInit\n#\n\n\nThe \ninit\n command is used to initialize or reconfigure a Layer0 instance. \nThis command will prompt the user for inputs required to create/update a Layer0 instance. \nEach of the inputs can be specified through an optional flag.\n\n\nUsage\n#\n\n\n$ l0-setup init [options] \ninstance_name\n \n\n\n\n\nOptions\n#\n\n\n\n\n--docker-path\n - Path to docker config.json file. \nThis is used to include private Docker Registry authentication for this Layer0 instance.\n\n\n--module-source\n - The source input variable is the path to the Terraform Layer0. \nBy default, this points to the Layer0 github repository. \nUsing values other than the default may result in undesired consequences.\n\n\n--version\n - The version input variable specifies the tag to use for the Layer0\nDocker images: \nquintilesims/l0-api\n and \nquintilesims/l0-runner\n.\n\n\n--aws-access-key\n - The access_key input variable is used to provision the AWS resources\nrequired for Layer0. \nThis corresponds to the Access Key ID portion of an AWS Access Key.\nIt is recommended this key has the \nAdministratorAccess\n policy. \n\n\n--aws-secret-key\n The secret_key input variable is used to provision the AWS resources\nrequired for Layer0. \nThis corresponds to the Secret Access Key portion of an AWS Access Key.\nIt is recommended this key has the \nAdministratorAccess\n policy.\n\n\n\n\n--aws-region\n - The region input variable specifies which region to provision the\nAWS resources required for Layer0. The following regions can be used:\n\n\n\n\nus-west-1\n\n\nus-west-2\n\n\nus-east-1\n\n\neu-west-1\n\n\n\n\n\n\n\n\n--aws-ssh-key-pair\n - The ssh_key_pair input variable specifies the name of the\nssh key pair to include in EC2 instances provisioned by Layer0. \nThis key pair must already exist in the AWS account. \nThe names of existing key pairs can be found in the EC2 dashboard.\n\n\n\n\n\n\n\n\nPlan\n#\n\n\nThe \nplan\n command is used to show the planned operation(s) to run during the next \napply\n on a Layer0 instance without actually executing any actions\n\n\nUsage\n#\n\n\n$ l0-setup plan \ninstance_name\n \n\n\n\n\nOptions\n#\n\n\nThere are no options for this command\n\n\n\n\nApply\n#\n\n\nThe \napply\n command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the \n--push=false\n flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional \n--aws-*\n flags, are read from the environment variables or a credentials file. \n\n\nUsage\n#\n\n\n$ l0-setup apply [options] \ninstance_name\n \n\n\n\n\nOptions\n#\n\n\n\n\n--quick\n - Skips verification checks that normally run after \nterraform apply\n has completed\n\n\n--push\n - Skips uploading local Layer0 configuration files to an S3 bucket\n\n\n--aws-access-key\n - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-secret-key\n - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-region\n - The region of the Layer0 instance. The default value is \nus-west-2\n. \n\n\n\n\n\n\nList\n#\n\n\nThe \nlist\n command is used to list local and remote Layer0 instances.\n\n\nUsage\n#\n\n\n$ l0-setup list [options]\n\n\n\n\nOptions\n#\n\n\n\n\n-l, --local\n - Show local Layer0 instances. This value is true by default.\n\n\n-r, --remote\n - Show remote Layer0 instances. This value is true by default. \n\n\n--aws-access-key\n - The Access Key ID portion of an AWS Access Key that has permissions to list S3 buckets. \nIf not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-secret-key\n - The Secret Access Key portion of an AWS Access Key that has permissions to list S3 buckets. \nIf not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-region\n - The region to list S3 buckets. The default value is \nus-west-2\n. \n\n\n\n\n\n\nPush\n#\n\n\nThe \npush\n command is used to back up your Layer0 configuration files to an S3 bucket.\n\n\nUsage\n#\n\n\n$ l0-setup push [options] \ninstance_name\n \n\n\n\n\nOptions\n#\n\n\n\n\n--aws-access-key\n - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-secret-key\n - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-region\n - The region of the Layer0 instance. The default value is \nus-west-2\n. \n\n\n\n\n\n\nPull\n#\n\n\nThe \npull\n command is used copy Layer0 configuration files from an S3 bucket.\n\n\nUsage\n#\n\n\n$ l0-setup pull [options] \ninstance_name\n \n\n\n\n\nOptions\n#\n\n\n\n\n--aws-access-key\n - The Access Key ID portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-secret-key\n - The Secret Access Key portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. \n\n\n--aws-region\n - The region of the Layer0 instance. The default value is \nus-west-2\n. \n\n\n\n\n\n\nEndpoint\n#\n\n\nThe \nendpoint\n command is used to show environment variables used to connect to a Layer0 instance\n\n\nUsage\n#\n\n\n$ l0-setup endpoint [options] \ninstance_name\n \n\n\n\n\nOptions\n#\n\n\n\n\n-i, --insecure\n - Show environment variables that allow for insecure settings\n\n\n-d, --dev\n - Show environment variables that are required for local development\n\n\n-s --syntax\n - Choose the syntax to display environment variables \n(choices: \nbash\n, \ncmd\n, \npowershell\n) (default: \nbash\n)\n\n\n\n\n\n\nDestroy\n#\n\n\nThe \ndestroy\n command is used to destroy all resources associated with a Layer0 instance.\n\n\n\n\nCaution\n\n\nDestroying a Layer0 instance cannot be undone; if you created backups of your Layer0 configuration using the \npush\n command, those backups will also be deleted when you run the \ndestroy\n command.\n\n\n\n\nUsage\n#\n\n\n$ l0-setup destroy [options] \ninstance_name\n \n\n\n\n\nOptions\n#\n\n\n\n\n--force\n - Skips confirmation prompt\n\n\n\n\n\n\nUpgrade\n#\n\n\nThe \nupgrade\n command is used to upgrade a Layer0 instance to a new version.\nYou will need to run an \napply\n after this command has completed. \n\n\nUsage\n#\n\n\n$ l0-setup upgrade [options] \ninstance_name\n \nversion\n\n\n\n\nOptions\n#\n\n\n\n\n--force\n - Skips confirmation prompt\n\n\n\n\n\n\nSet\n#\n\n\nThe \nset\n command is used set input variable(s) for a Layer0 instance's Terraform module.\nThis command can be used to shorthand the \ninit\n and \nupgrade\n commands, \nand can also be used with custom Layer0 modules. \nYou will need to run an \napply\n after this command has completed. \n\n\nUsage\n#\n\n\n$ l0-setup set [options] \ninstance_name\n\n\n\n\nExample Usage\n\n\n$ l0-setup set --input username=admin --input password=pass123 mylayer0\n\n\n\n\nOptions\n#\n\n\n\n\n--input\n - Specify an input using \nkey=val\n format", - "title": "Layer0 Setup CLI" - }, - { - "location": "/reference/setup-cli/#layer0-setup-reference", - "text": "The Layer0 Setup application (commonly called l0-setup ), is used to provision, update, and destroy Layer0 instances.", - "title": "Layer0 Setup Reference" - }, - { - "location": "/reference/setup-cli/#general-usage", - "text": "You can use the -h, --help command to get generate information about the l0-setup tool:", - "title": "General Usage" - }, - { - "location": "/reference/setup-cli/#init", - "text": "The init command is used to initialize or reconfigure a Layer0 instance. \nThis command will prompt the user for inputs required to create/update a Layer0 instance. \nEach of the inputs can be specified through an optional flag.", - "title": "Init" - }, - { - "location": "/reference/setup-cli/#usage", - "text": "$ l0-setup init [options] instance_name", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options", - "text": "--docker-path - Path to docker config.json file. \nThis is used to include private Docker Registry authentication for this Layer0 instance. --module-source - The source input variable is the path to the Terraform Layer0. \nBy default, this points to the Layer0 github repository. \nUsing values other than the default may result in undesired consequences. --version - The version input variable specifies the tag to use for the Layer0\nDocker images: quintilesims/l0-api and quintilesims/l0-runner . --aws-access-key - The access_key input variable is used to provision the AWS resources\nrequired for Layer0. \nThis corresponds to the Access Key ID portion of an AWS Access Key.\nIt is recommended this key has the AdministratorAccess policy. --aws-secret-key The secret_key input variable is used to provision the AWS resources\nrequired for Layer0. \nThis corresponds to the Secret Access Key portion of an AWS Access Key.\nIt is recommended this key has the AdministratorAccess policy. --aws-region - The region input variable specifies which region to provision the\nAWS resources required for Layer0. The following regions can be used: us-west-1 us-west-2 us-east-1 eu-west-1 --aws-ssh-key-pair - The ssh_key_pair input variable specifies the name of the\nssh key pair to include in EC2 instances provisioned by Layer0. \nThis key pair must already exist in the AWS account. \nThe names of existing key pairs can be found in the EC2 dashboard.", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#plan", - "text": "The plan command is used to show the planned operation(s) to run during the next apply on a Layer0 instance without actually executing any actions", - "title": "Plan" - }, - { - "location": "/reference/setup-cli/#usage_1", - "text": "$ l0-setup plan instance_name", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_1", - "text": "There are no options for this command", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#apply", - "text": "The apply command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the --push=false flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional --aws-* flags, are read from the environment variables or a credentials file.", - "title": "Apply" - }, - { - "location": "/reference/setup-cli/#usage_2", - "text": "$ l0-setup apply [options] instance_name", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_2", - "text": "--quick - Skips verification checks that normally run after terraform apply has completed --push - Skips uploading local Layer0 configuration files to an S3 bucket --aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-region - The region of the Layer0 instance. The default value is us-west-2 .", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#list", - "text": "The list command is used to list local and remote Layer0 instances.", - "title": "List" - }, - { - "location": "/reference/setup-cli/#usage_3", - "text": "$ l0-setup list [options]", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_3", - "text": "-l, --local - Show local Layer0 instances. This value is true by default. -r, --remote - Show remote Layer0 instances. This value is true by default. --aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to list S3 buckets. \nIf not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to list S3 buckets. \nIf not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-region - The region to list S3 buckets. The default value is us-west-2 .", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#push", - "text": "The push command is used to back up your Layer0 configuration files to an S3 bucket.", - "title": "Push" - }, - { - "location": "/reference/setup-cli/#usage_4", - "text": "$ l0-setup push [options] instance_name", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_4", - "text": "--aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-region - The region of the Layer0 instance. The default value is us-west-2 .", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#pull", - "text": "The pull command is used copy Layer0 configuration files from an S3 bucket.", - "title": "Pull" - }, - { - "location": "/reference/setup-cli/#usage_5", - "text": "$ l0-setup pull [options] instance_name", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_5", - "text": "--aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI. --aws-region - The region of the Layer0 instance. The default value is us-west-2 .", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#endpoint", - "text": "The endpoint command is used to show environment variables used to connect to a Layer0 instance", - "title": "Endpoint" - }, - { - "location": "/reference/setup-cli/#usage_6", - "text": "$ l0-setup endpoint [options] instance_name", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_6", - "text": "-i, --insecure - Show environment variables that allow for insecure settings -d, --dev - Show environment variables that are required for local development -s --syntax - Choose the syntax to display environment variables \n(choices: bash , cmd , powershell ) (default: bash )", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#destroy", - "text": "The destroy command is used to destroy all resources associated with a Layer0 instance. Caution Destroying a Layer0 instance cannot be undone; if you created backups of your Layer0 configuration using the push command, those backups will also be deleted when you run the destroy command.", - "title": "Destroy" - }, - { - "location": "/reference/setup-cli/#usage_7", - "text": "$ l0-setup destroy [options] instance_name", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_7", - "text": "--force - Skips confirmation prompt", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#upgrade", - "text": "The upgrade command is used to upgrade a Layer0 instance to a new version.\nYou will need to run an apply after this command has completed.", - "title": "Upgrade" - }, - { - "location": "/reference/setup-cli/#usage_8", - "text": "$ l0-setup upgrade [options] instance_name version", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_8", - "text": "--force - Skips confirmation prompt", - "title": "Options" - }, - { - "location": "/reference/setup-cli/#set", - "text": "The set command is used set input variable(s) for a Layer0 instance's Terraform module.\nThis command can be used to shorthand the init and upgrade commands, \nand can also be used with custom Layer0 modules. \nYou will need to run an apply after this command has completed.", - "title": "Set" - }, - { - "location": "/reference/setup-cli/#usage_9", - "text": "$ l0-setup set [options] instance_name Example Usage $ l0-setup set --input username=admin --input password=pass123 mylayer0", - "title": "Usage" - }, - { - "location": "/reference/setup-cli/#options_9", - "text": "--input - Specify an input using key=val format", - "title": "Options" - }, - { - "location": "/reference/terraform_introduction/", - "text": "Introduction to Terraform\n#\n\n\nWhat does Terraform do?\n#\n\n\nTerraform is a powerful orchestration tool for creating, updating, deleting, and otherwise managing infrastructure in an easy-to-understand, declarative manner.\nTerraform's \ndocumentation\n is very good, but at a glance:\n\n\nBe Declarative -\n\nSpecify desired infrastructure results in Terraform (\n*.tf\n) files, and let Terraform do the heavy work of figuring out how to make that specification a reality.\n\n\nScry the Future -\n\nUse \nterraform plan\n to see a list of everything that Terraform \nwould\n do without actually making those changes.\n\n\nVersion Infrastructure -\n\nCheck Terraform files into a VCS to track changes to and manage versions of your infrastructure.\n\n\nWhy Terraform?\n#\n\n\nWhy did we latch onto Terraform instead of something like CloudFormation?\n\n\nCloud-Agnostic -\n\nUnlike CloudFormation, Terraform is able to incorporate different \nresource providers\n to manage infrastructure across multiple cloud services (not just AWS).\n\n\nCustom Providers -\n\nTerraform can be extended to manage tools that don't come natively through use of custom providers.\nWe wrote a \nLayer0 provider\n so that Terraform can manage Layer0 resources in addition to tools and resources and infrastructure beyond Layer0's scope.\n\n\nTerraform has some \nthings to say\n on the matter as well.\n\n\nAdvantages Versus Layer0 CLI?\n#\n\n\nWhy should you move from using (or scripting) the Layer0 CLI directly?\n\n\nReduce Fat-Fingering Mistakes -\n\nCreating Terraform files (and using \nterraform plan\n) allows you to review your deployment and catch errors.\nExecuting Layer0 CLI commands one-by-one is tiresome, non-transportable, and a process ripe for typos.\n\n\nGo Beyond Layer0 -\n\nRetain the benefits of leveraging Layer0's concepts and resources using our \nprovider\n, but also gain the ability to orchestrate resources and tools beyond the CLI's scope.\n\n\nHow do I get Terraform?\n#\n\n\nCheck out Terraform's \ndocumentation\n on the subject.", - "title": "Terraform" - }, - { - "location": "/reference/terraform_introduction/#introduction-to-terraform", - "text": "", - "title": "Introduction to Terraform" - }, - { - "location": "/reference/terraform_introduction/#what-does-terraform-do", - "text": "Terraform is a powerful orchestration tool for creating, updating, deleting, and otherwise managing infrastructure in an easy-to-understand, declarative manner.\nTerraform's documentation is very good, but at a glance: Be Declarative - \nSpecify desired infrastructure results in Terraform ( *.tf ) files, and let Terraform do the heavy work of figuring out how to make that specification a reality. Scry the Future - \nUse terraform plan to see a list of everything that Terraform would do without actually making those changes. Version Infrastructure - \nCheck Terraform files into a VCS to track changes to and manage versions of your infrastructure.", - "title": "What does Terraform do?" - }, - { - "location": "/reference/terraform_introduction/#why-terraform", - "text": "Why did we latch onto Terraform instead of something like CloudFormation? Cloud-Agnostic - \nUnlike CloudFormation, Terraform is able to incorporate different resource providers to manage infrastructure across multiple cloud services (not just AWS). Custom Providers - \nTerraform can be extended to manage tools that don't come natively through use of custom providers.\nWe wrote a Layer0 provider so that Terraform can manage Layer0 resources in addition to tools and resources and infrastructure beyond Layer0's scope. Terraform has some things to say on the matter as well.", - "title": "Why Terraform?" - }, - { - "location": "/reference/terraform_introduction/#advantages-versus-layer0-cli", - "text": "Why should you move from using (or scripting) the Layer0 CLI directly? Reduce Fat-Fingering Mistakes - \nCreating Terraform files (and using terraform plan ) allows you to review your deployment and catch errors.\nExecuting Layer0 CLI commands one-by-one is tiresome, non-transportable, and a process ripe for typos. Go Beyond Layer0 - \nRetain the benefits of leveraging Layer0's concepts and resources using our provider , but also gain the ability to orchestrate resources and tools beyond the CLI's scope.", - "title": "Advantages Versus Layer0 CLI?" - }, - { - "location": "/reference/terraform_introduction/#how-do-i-get-terraform", - "text": "Check out Terraform's documentation on the subject.", - "title": "How do I get Terraform?" - }, - { - "location": "/reference/terraform-plugin/", - "text": "Layer0 Terraform Provider Reference\n#\n\n\nTerraform is an open-source tool for provisioning and managing infrastructure.\nIf you are new to Terraform, we recommend checking out their \ndocumentation\n.\n\n\nLayer0 has built a custom \nprovider\n for Layer0.\nThis provider allows users to create, manage, and update Layer0 entities using Terraform.\n\n\nPrerequisites\n#\n\n\n\n\nTerraform v0.9.4+\n (\ndownload\n), accessible in your system path.\n\n\n\n\nInstall\n#\n\n\nDownload a Layer0 v0.8.4+ \nrelease\n.\nThe Terraform plugin binary is located in the release zip file as \nterraform-provider-layer0\n.\nCopy this \nterraform-provider-layer0\n binary into the same directory as your Terraform binary - and you're done!\n\n\nFor further information, see Terraform's documentation on installing a Terraform plugin \nhere\n.\n\n\nGetting Started\n#\n\n\n\n\nCheckout the \nTerraform\n section of the Guestbook walkthrough \nhere\n.\n\n\nWe've added some tips and links to helpful resources in the \nBest Practices\n section below.\n\n\n\n\n\n\nProvider\n#\n\n\nThe Layer0 provider is used to interact with a Layer0 API.\nThe provider needs to be configured with the proper credentials before it can be used.\n\n\nExample Usage\n#\n\n\n# Add 'endpoint' and 'token' variables\nvariable \nendpoint\n {}\n\nvariable \ntoken\n {}\n\n# Configure the layer0 provider\nprovider \nlayer0\n {\n endpoint = \n${var.endpoint}\n\n token = \n${var.token}\n\n skip_ssl_verify = true\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nConfiguration\n\n\nThe \nendpoint\n and \ntoken\n variables for your layer0 api can be found using the \nl0-setup endpoint\n command\n\n\n\n\n\n\nendpoint\n - (Required) The endpoint of the layer0 api\n\n\ntoken\n - (Required) The authentication token for the layer0 api\n\n\nskip_ssl_verify\n - (Optional) If true, ssl certificate mismatch warnings will be ignored\n\n\n\n\n\n\nAPI Data Source\n#\n\n\nThe API data source is used to extract useful read-only variables from the Layer0 API.\n\n\nExample Usage\n#\n\n\n# Configure the api data source\ndata \nlayer0_api\n \nconfig\n {}\n\n# Output the layer0 vpc id\noutput \nvpc id\n {\n val = \n${data.layer0_api.config.vpc_id}\n\n}\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nprefix\n - The prefix of the layer0 instance\n\n\nvpc_id\n - The vpc id of the layer0 instance\n\n\npublic_subnets\n - A list containing the 2 public subnet ids in the layer0 vpc\n\n\nprivate_subnets\n - A list containing the 2 private subnet ids in the layer0 vpc\n\n\n\n\n\n\nDeploy Data Source\n#\n\n\nThe Deploy data source is used to extract Layer0 Deploy attributes.\n\n\nExample Usage\n#\n\n\n# Configure the deploy data source\ndata \nlayer0_deploy\n \ndpl\n {\n name = \nmy-deploy\n\n version = \n1\n\n}\n\n# Output the layer0 deploy id\noutput \ndeploy_id\n {\n val = \n${data.layer0_deploy.dpl.id}\n\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the deploy\n\n\nversion\n - (Required) The version of the deploy\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nname\n - The name of the deploy\n\n\nversion\n - The version of the deploy\n\n\nid\n - The id of the deploy\n\n\n\n\n\n\nEnvironment Data Source\n#\n\n\nThe Environment data source is used to extract Layer0 Environment attributes.\n\n\nExample Usage\n#\n\n\n# Configure the environment data source\ndata \nlayer0_environment\n \nenv\n {\n name = \nmy-environment\n\n}\n\n# Output the layer0 environment id\noutput \nenvironment_id\n {\n val = \n${data.layer0_environment.env.id}\n\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the environment\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the environment\n\n\nname\n - The name of the environment\n\n\nsize\n - The size of the instances in the environment\n\n\nmin_count\n - The current number instances in the environment\n\n\nos\n - The operating system used for the environment\n\n\nami\n - The AMI ID used for the environment\n\n\n\n\n\n\nLoad Balancer Data Source\n#\n\n\nThe Load Balancer data source is used to extract Layer0 Load Balancer attributes.\n\n\nExample Usage\n#\n\n\n# Configure the load balancer source\ndata \nlayer0_load_balancer\n \nlb\n {\n name = \nmy-loadbalancer\n\n environment_id = \n${data.layer0_environment.env.environment_id}\n\n}\n\n# Output the layer0 load balancer id\noutput \nload_balancer_id\n {\n val = \n${data.layer0_load_balancer.lb.id}\n\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (required) The name of the load balancer\n\n\nenvironment_id\n - (required) The id of the environment the load balancer exists in\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the load balancer\n\n\nname\n - The name of the load balancer\n\n\nenvironment_id\n - The id of the environment the load balancer exists in\n\n\nenvironment_name\n - The name of the environment the load balancer exists in\n\n\nprivate\n - Whether or not the load balancer is private\n\n\nurl\n - The URL of the load balancer\n\n\n\n\n\n\nService Data Source\n#\n\n\nThe Service data source is used to extract Layer0 Service attributes.\n\n\nExample Usage\n#\n\n\n# Configure the service data source\ndata \nlayer0_service\n \nsvc\n {\n name = \nmy-service\n\n environment_id = \n${data.layer0_environment.env.environment_id}\n\n}\n\n# Output the layer0 service id\noutput \nservice_id\n {\n val = \n${data.layer0_service.svc.id}\n\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (required) The name of the service\n\n\nenvironment_id\n - (required) The id of the environment the service exists in\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the service\n\n\nname\n - The name of the service\n\n\nenvironment_id\n - The id of the environment the service exists in\n\n\nenvironment_name\n - The name of the environment the service exists in\n\n\nscale\n - The current desired scale of the service\n\n\n\n\n\n\nDeploy Resource\n#\n\n\nProvides a Layer0 Deploy.\n\n\nPerforming variable substitution inside of your deploy's json file (typically named \nDockerrun.aws.json\n) can be done through Terraform's \ntemplate_file\n.\nFor a working example, please see the sample \nGuestbook\n application\n\n\nExample Usage\n#\n\n\n# Configure the deploy template\ndata \ntemplate_file\n \nguestbook\n {\n template = \n${file(\nDockerrun.aws.json\n)}\n\n vars {\n docker_image_tag = \nlatest\n\n }\n}\n\n# Create a deploy using the rendered template\nresource \nlayer0_deploy\n \nguestbook\n {\n name = \nguestbook\n\n content = \n${data.template_file.guestbook.rendered}\n\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the deploy\n\n\ncontent\n - (Required) The content of the deploy\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the deploy\n\n\nname\n - The name of the deploy\n\n\nversion\n - The version number of the deploy\n\n\n\n\n\n\nEnvironment Resource\n#\n\n\nProvides a Layer0 Environment\n\n\nExample Usage\n#\n\n\n# Create a new environment\nresource \nlayer0_environment\n \ndemo\n {\n name = \ndemo\n\n size = \nm3.medium\n\n min_count = 0\n user_data = \necho hello, world\n\n os = \nlinux\n\n ami = \nami123\n\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the environment\n\n\nsize\n - (Optional, Default: \"m3.medium\") The size of the instances in the environment.\nAvailable instance sizes can be found \nhere\n\n\nmin_count\n - (Optional, Default: 0) The minimum number of instances allowed in the environment\n\n\nuser-data\n - (Optional) The user data template to use for the environment's autoscaling group.\nSee the \ncli reference\n for the default template.\n\n\nos\n - (Optional, Default: \"linux\") Specifies the type of operating system used in the environment.\nOptions are \"linux\" or \"windows\".\n\n\nami\n - (Optional) A custom AMI ID to use in the environment. \nIf not specified, Layer0 will use its default AMI ID for the specified operating system.\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the environment\n\n\nname\n - The name of the environment\n\n\nsize\n - The size of the instances in the environment\n\n\ncluster_count\n - The current number instances in the environment\n\n\nsecurity_group_id\n - The ID of the environment's security group\n\n\nos\n - The operating system used for the environment\n\n\nami\n - The AMI ID used for the environment\n\n\n\n\n\n\nLoad Balancer Resource\n#\n\n\nProvides a Layer0 Load Balancer\n\n\nExample Usage\n#\n\n\n# Create a new load balancer\nresource \nlayer0_load_balancer\n \nguestbook\n {\n name = \nguestbook\n\n environment = \ndemo123\n\n private = false\n\n port {\n host_port = 80\n container_port = 80\n protocol = \nhttp\n\n }\n\n port {\n host_port = 443\n container_port = 443\n protocol = \nhttps\n\n certificate = \ncert\n\n }\n\n health_check {\n target = \ntcp:80\n\n interval = 30\n timeout = 5\n healthy_threshold = 2\n unhealthy_threshold = 2\n }\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the load balancer\n\n\nenvironment\n - (Required) The id of the environment to place the load balancer inside of\n\n\nprivate\n - (Optional) If true, the load balancer will not be exposed to the public internet\n\n\nport\n - (Optional, Default: 80:80/tcp) A list of port blocks. Ports documented below\n\n\nhealth_check\n - (Optional, Default: {\"TCP:80\" 30 5 2 2}) A health_check block. Health check documented below\n\n\n\n\nPorts (\nport\n) support the following:\n\n\n\n\nhost_port\n - (Required) The port on the load balancer to listen on\n\n\ncontainer_port\n - (Required) The port on the docker container to route to\n\n\nprotocol\n - (Required) The protocol to listen on. Valid values are \nHTTP, HTTPS, TCP, or SSL\n\n\ncertificate\n - (Optional) The name of an SSL certificate. Only required if the \nHTTP\n or \nSSL\n protocol is used.\n\n\n\n\nHealthcheck (\nhealth_check\n) supports the following:\n\n\n\n\ntarget\n - (Required) The target of the check. Valid pattern is \"${PROTOCOL}:${PORT}${PATH}\", where PROTOCOL values are:\n\n\nHTTP\n, \nHTTPS\n - PORT and PATH are required\n\n\nTCP\n, \nSSL\n - PORT is required, PATH is not supported\n\n\n\n\n\n\ninterval\n - (Required) The interval between checks.\n\n\ntimeout\n - (Required) The length of time before the check times out.\n\n\nhealthy_threshold\n - (Required) The number of checks before the instance is declared healthy.\n\n\nunhealthy_threshold\n - (Required) The number of checks before the instance is declared unhealthy.\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the load balancer\n\n\nname\n - The name of the load balancer\n\n\nenvironment\n - The id of the environment the load balancer exists in\n\n\nprivate\n - Whether or not the load balancer is private\n\n\nurl\n - The URL of the load balancer\n\n\n\n\n\n\nService Resource\n#\n\n\nProvides a Layer0 Service\n\n\nExample Usage\n#\n\n\n# Create a new service\nresource \nlayer0_service\n \nguestbook\n {\n name = \nguestbook\n\n environment = \nenvironment123\n\n deploy = \ndeploy123\n\n load_balancer = \nloadbalancer123\n\n scale = 3\n}\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the service\n\n\nenvironment\n - (Required) The id of the environment to place the service inside of\n\n\ndeploy\n - (Required) The id of the deploy for the service to run\n\n\nload_balancer\n (Optional) The id of the load balancer to place the service behind\n\n\nscale\n (Optional, Default: 1) The number of copies of the service to run\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the service\n\n\nname\n - The name of the service\n\n\nenvironment\n - The id of the environment the service exists in\n\n\ndeploy\n - The id of the deploy the service is running\n\n\nload_balancer\n - The id of the load balancer the service is behind (if \nload_balancer\n was set)\n\n\nscale\n - The current desired scale of the service\n\n\n\n\n\n\nBest Practices\n#\n\n\n\n\nAlways run \nTerraform plan\n before \nterraform apply\n.\nThis will show you what action(s) Terraform plans to make before actually executing them.\n\n\nUse \nvariables\n to reference secrets.\nSecrets can be placed in a file named \nTerraform.tfvars\n, or by setting \nTF_VAR_*\n environment variables.\nMore information can be found \nhere\n.\n\n\nUse Terraform's \nremote\n command to backup and sync your \nterraform.tfstate\n file across different members in your organization.\nTerraform has documentation for using S3 as a backend \nhere\n.\n\n\nTerraform \nmodules\n allow you to define and consume reusable components.\n\n\nExample configurations can be found \nhere", - "title": "Layer0 Terraform Plugin" - }, - { - "location": "/reference/terraform-plugin/#layer0-terraform-provider-reference", - "text": "Terraform is an open-source tool for provisioning and managing infrastructure.\nIf you are new to Terraform, we recommend checking out their documentation . Layer0 has built a custom provider for Layer0.\nThis provider allows users to create, manage, and update Layer0 entities using Terraform.", - "title": "Layer0 Terraform Provider Reference" - }, - { - "location": "/reference/terraform-plugin/#prerequisites", - "text": "Terraform v0.9.4+ ( download ), accessible in your system path.", - "title": "Prerequisites" - }, - { - "location": "/reference/terraform-plugin/#install", - "text": "Download a Layer0 v0.8.4+ release .\nThe Terraform plugin binary is located in the release zip file as terraform-provider-layer0 .\nCopy this terraform-provider-layer0 binary into the same directory as your Terraform binary - and you're done! For further information, see Terraform's documentation on installing a Terraform plugin here .", - "title": "Install" - }, - { - "location": "/reference/terraform-plugin/#getting-started", - "text": "Checkout the Terraform section of the Guestbook walkthrough here . We've added some tips and links to helpful resources in the Best Practices section below.", - "title": "Getting Started" - }, - { - "location": "/reference/terraform-plugin/#provider", - "text": "The Layer0 provider is used to interact with a Layer0 API.\nThe provider needs to be configured with the proper credentials before it can be used.", - "title": "Provider" - }, - { - "location": "/reference/terraform-plugin/#example-usage", - "text": "# Add 'endpoint' and 'token' variables\nvariable endpoint {}\n\nvariable token {}\n\n# Configure the layer0 provider\nprovider layer0 {\n endpoint = ${var.endpoint} \n token = ${var.token} \n skip_ssl_verify = true\n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference", - "text": "The following arguments are supported: Configuration The endpoint and token variables for your layer0 api can be found using the l0-setup endpoint command endpoint - (Required) The endpoint of the layer0 api token - (Required) The authentication token for the layer0 api skip_ssl_verify - (Optional) If true, ssl certificate mismatch warnings will be ignored", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#api-data-source", - "text": "The API data source is used to extract useful read-only variables from the Layer0 API.", - "title": "API Data Source" - }, - { - "location": "/reference/terraform-plugin/#example-usage_1", - "text": "# Configure the api data source\ndata layer0_api config {}\n\n# Output the layer0 vpc id\noutput vpc id {\n val = ${data.layer0_api.config.vpc_id} \n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference", - "text": "The following attributes are exported: prefix - The prefix of the layer0 instance vpc_id - The vpc id of the layer0 instance public_subnets - A list containing the 2 public subnet ids in the layer0 vpc private_subnets - A list containing the 2 private subnet ids in the layer0 vpc", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#deploy-data-source", - "text": "The Deploy data source is used to extract Layer0 Deploy attributes.", - "title": "Deploy Data Source" - }, - { - "location": "/reference/terraform-plugin/#example-usage_2", - "text": "# Configure the deploy data source\ndata layer0_deploy dpl {\n name = my-deploy \n version = 1 \n}\n\n# Output the layer0 deploy id\noutput deploy_id {\n val = ${data.layer0_deploy.dpl.id} \n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_1", - "text": "The following arguments are supported: name - (Required) The name of the deploy version - (Required) The version of the deploy", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_1", - "text": "The following attributes are exported: name - The name of the deploy version - The version of the deploy id - The id of the deploy", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#environment-data-source", - "text": "The Environment data source is used to extract Layer0 Environment attributes.", - "title": "Environment Data Source" - }, - { - "location": "/reference/terraform-plugin/#example-usage_3", - "text": "# Configure the environment data source\ndata layer0_environment env {\n name = my-environment \n}\n\n# Output the layer0 environment id\noutput environment_id {\n val = ${data.layer0_environment.env.id} \n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_2", - "text": "The following arguments are supported: name - (Required) The name of the environment", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_2", - "text": "The following attributes are exported: id - The id of the environment name - The name of the environment size - The size of the instances in the environment min_count - The current number instances in the environment os - The operating system used for the environment ami - The AMI ID used for the environment", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#load-balancer-data-source", - "text": "The Load Balancer data source is used to extract Layer0 Load Balancer attributes.", - "title": "Load Balancer Data Source" - }, - { - "location": "/reference/terraform-plugin/#example-usage_4", - "text": "# Configure the load balancer source\ndata layer0_load_balancer lb {\n name = my-loadbalancer \n environment_id = ${data.layer0_environment.env.environment_id} \n}\n\n# Output the layer0 load balancer id\noutput load_balancer_id {\n val = ${data.layer0_load_balancer.lb.id} \n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_3", - "text": "The following arguments are supported: name - (required) The name of the load balancer environment_id - (required) The id of the environment the load balancer exists in", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_3", - "text": "The following attributes are exported: id - The id of the load balancer name - The name of the load balancer environment_id - The id of the environment the load balancer exists in environment_name - The name of the environment the load balancer exists in private - Whether or not the load balancer is private url - The URL of the load balancer", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#service-data-source", - "text": "The Service data source is used to extract Layer0 Service attributes.", - "title": "Service Data Source" - }, - { - "location": "/reference/terraform-plugin/#example-usage_5", - "text": "# Configure the service data source\ndata layer0_service svc {\n name = my-service \n environment_id = ${data.layer0_environment.env.environment_id} \n}\n\n# Output the layer0 service id\noutput service_id {\n val = ${data.layer0_service.svc.id} \n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_4", - "text": "The following arguments are supported: name - (required) The name of the service environment_id - (required) The id of the environment the service exists in", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_4", - "text": "The following attributes are exported: id - The id of the service name - The name of the service environment_id - The id of the environment the service exists in environment_name - The name of the environment the service exists in scale - The current desired scale of the service", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#deploy-resource", - "text": "Provides a Layer0 Deploy. Performing variable substitution inside of your deploy's json file (typically named Dockerrun.aws.json ) can be done through Terraform's template_file .\nFor a working example, please see the sample Guestbook application", - "title": "Deploy Resource" - }, - { - "location": "/reference/terraform-plugin/#example-usage_6", - "text": "# Configure the deploy template\ndata template_file guestbook {\n template = ${file( Dockerrun.aws.json )} \n vars {\n docker_image_tag = latest \n }\n}\n\n# Create a deploy using the rendered template\nresource layer0_deploy guestbook {\n name = guestbook \n content = ${data.template_file.guestbook.rendered} \n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_5", - "text": "The following arguments are supported: name - (Required) The name of the deploy content - (Required) The content of the deploy", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_5", - "text": "The following attributes are exported: id - The id of the deploy name - The name of the deploy version - The version number of the deploy", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#environment-resource", - "text": "Provides a Layer0 Environment", - "title": "Environment Resource" - }, - { - "location": "/reference/terraform-plugin/#example-usage_7", - "text": "# Create a new environment\nresource layer0_environment demo {\n name = demo \n size = m3.medium \n min_count = 0\n user_data = echo hello, world \n os = linux \n ami = ami123 \n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_6", - "text": "The following arguments are supported: name - (Required) The name of the environment size - (Optional, Default: \"m3.medium\") The size of the instances in the environment.\nAvailable instance sizes can be found here min_count - (Optional, Default: 0) The minimum number of instances allowed in the environment user-data - (Optional) The user data template to use for the environment's autoscaling group.\nSee the cli reference for the default template. os - (Optional, Default: \"linux\") Specifies the type of operating system used in the environment.\nOptions are \"linux\" or \"windows\". ami - (Optional) A custom AMI ID to use in the environment. \nIf not specified, Layer0 will use its default AMI ID for the specified operating system.", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_6", - "text": "The following attributes are exported: id - The id of the environment name - The name of the environment size - The size of the instances in the environment cluster_count - The current number instances in the environment security_group_id - The ID of the environment's security group os - The operating system used for the environment ami - The AMI ID used for the environment", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#load-balancer-resource", - "text": "Provides a Layer0 Load Balancer", - "title": "Load Balancer Resource" - }, - { - "location": "/reference/terraform-plugin/#example-usage_8", - "text": "# Create a new load balancer\nresource layer0_load_balancer guestbook {\n name = guestbook \n environment = demo123 \n private = false\n\n port {\n host_port = 80\n container_port = 80\n protocol = http \n }\n\n port {\n host_port = 443\n container_port = 443\n protocol = https \n certificate = cert \n }\n\n health_check {\n target = tcp:80 \n interval = 30\n timeout = 5\n healthy_threshold = 2\n unhealthy_threshold = 2\n }\n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_7", - "text": "The following arguments are supported: name - (Required) The name of the load balancer environment - (Required) The id of the environment to place the load balancer inside of private - (Optional) If true, the load balancer will not be exposed to the public internet port - (Optional, Default: 80:80/tcp) A list of port blocks. Ports documented below health_check - (Optional, Default: {\"TCP:80\" 30 5 2 2}) A health_check block. Health check documented below Ports ( port ) support the following: host_port - (Required) The port on the load balancer to listen on container_port - (Required) The port on the docker container to route to protocol - (Required) The protocol to listen on. Valid values are HTTP, HTTPS, TCP, or SSL certificate - (Optional) The name of an SSL certificate. Only required if the HTTP or SSL protocol is used. Healthcheck ( health_check ) supports the following: target - (Required) The target of the check. Valid pattern is \"${PROTOCOL}:${PORT}${PATH}\", where PROTOCOL values are: HTTP , HTTPS - PORT and PATH are required TCP , SSL - PORT is required, PATH is not supported interval - (Required) The interval between checks. timeout - (Required) The length of time before the check times out. healthy_threshold - (Required) The number of checks before the instance is declared healthy. unhealthy_threshold - (Required) The number of checks before the instance is declared unhealthy.", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_7", - "text": "The following attributes are exported: id - The id of the load balancer name - The name of the load balancer environment - The id of the environment the load balancer exists in private - Whether or not the load balancer is private url - The URL of the load balancer", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#service-resource", - "text": "Provides a Layer0 Service", - "title": "Service Resource" - }, - { - "location": "/reference/terraform-plugin/#example-usage_9", - "text": "# Create a new service\nresource layer0_service guestbook {\n name = guestbook \n environment = environment123 \n deploy = deploy123 \n load_balancer = loadbalancer123 \n scale = 3\n}", - "title": "Example Usage" - }, - { - "location": "/reference/terraform-plugin/#argument-reference_8", - "text": "The following arguments are supported: name - (Required) The name of the service environment - (Required) The id of the environment to place the service inside of deploy - (Required) The id of the deploy for the service to run load_balancer (Optional) The id of the load balancer to place the service behind scale (Optional, Default: 1) The number of copies of the service to run", - "title": "Argument Reference" - }, - { - "location": "/reference/terraform-plugin/#attribute-reference_8", - "text": "The following attributes are exported: id - The id of the service name - The name of the service environment - The id of the environment the service exists in deploy - The id of the deploy the service is running load_balancer - The id of the load balancer the service is behind (if load_balancer was set) scale - The current desired scale of the service", - "title": "Attribute Reference" - }, - { - "location": "/reference/terraform-plugin/#best-practices", - "text": "Always run Terraform plan before terraform apply .\nThis will show you what action(s) Terraform plans to make before actually executing them. Use variables to reference secrets.\nSecrets can be placed in a file named Terraform.tfvars , or by setting TF_VAR_* environment variables.\nMore information can be found here . Use Terraform's remote command to backup and sync your terraform.tfstate file across different members in your organization.\nTerraform has documentation for using S3 as a backend here . Terraform modules allow you to define and consume reusable components. Example configurations can be found here", - "title": "Best Practices" - }, - { - "location": "/reference/updateservice/", - "text": "Updating a Layer0 service\n#\n\n\nThere are three methods of updating an existing Layer0 service. The first method is to update the existing Deploy to refer to a new Docker task definition. The second method is to create a new Service that uses the same Loadbalancer. The third method is to create both a new Loadbalancer and a new Service.\n\n\nThere are advantages and disadvantages to each of these methods. The following sections discuss the advantages and disadvantages of using each method, and include procedures for implementing each method.\n\n\nMethod 1: Refer to a new task definition\n#\n\n\nThis method of updating a Layer0 application is the easiest to implement, because you do not need to rescale the Service or modify the Loadbalancer. This method is completely transparent to all other components of the application, and using this method does not involve any downtime.\n\n\nThe disadvantage of using this method is that you cannot perform A/B testing of the old and new services, and you cannot control which traffic goes to the old service and which goes to the new one.\n\n\nTo replace a Deploy to refer to a new task definition:\n\n\n\n\nAt the command line, type the following to create a new Deploy: \nl0 deploy create [pathToTaskDefinition] [deployName]\nNote that if \n[deployName]\n already exists, this step will create a new version of that Deploy.\n\n\nType the following to update the existing Service: \nl0 service update [existingServiceName] [deployName]\nBy default, the Service you specify in this command will refer to the latest version of \n[deployName]\n, if multiple versions of the Deploy exist.\nNote\nIf you want to refer to a specific version of the Deploy, type the following command instead of the one shown above: \nl0 service update [serviceName] [deployName]:[deployVersion]\n\n\n\n\nMethod 2: Create a new Deploy and Service using the same Loadbalancer\n#\n\n\nThis method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the \nl0 service scale\n command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates.\n\n\nThe disadvantage of using this method is that you cannot control the routing of traffic between the old and new versions of the application.\n\n\nTo create a new Deploy and Service:\n\n\n\n\nAt the command line, type the following to create a new Deploy (or a new version of the Deploy, if \n[deployName]\n already exists):\n \nl0 deploy create [pathToTaskDefinition] [deployName]\n\n\nType the following command to create a new Service that refers to \n[deployName]\n behind an existing Loadbalancer named \n[loadbalancerName]\n:\n \nl0 service create --loadbalancer [loadbalancerName] [environmentName] [deployName]\n\n\nCheck to make sure that the new Service is working as expected. If it is, and you do not want to keep the old Service, type the following command to delete the old Service: \nl0 service delete [oldServiceName]\n\n\n\n\nMethod 3: Create a new Deploy, Loadbalancer and Service\n#\n\n\nThe final method of updating a Layer0 service is to create an entirely new Deploy, Loadbalancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services.\n\n\nThe disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Loadbalancer.\n\n\nTo create a new Deploy, Loadbalancer and Service:\n\n\n\n\nAt the command line, type the following command to create a new Deploy:\nl0 deploy create [pathToTaskDefinition] [deployName]\n\n\nType the following command to create a new Loadbalancer:\n \nl0 loadbalancer create --port [portNumber] [environmentName] [loadbalancerName] [deployName]\nNote\nThe value of \n[loadbalancerName]\n in the above command must be unique.\n\n\nType the following command to create a new Service: \nl0 service create --loadbalancer [loadBalancerName] [environmentName] [serviceName] [deployName]\nNote\nThe value of \n[serviceName]\n in the above command must be unique.\n\n\nImplement a method of routing traffic between the old and new Services, such as \nHAProxy\n or \nConsul\n.", - "title": "Updating a Service" - }, - { - "location": "/reference/updateservice/#updating-a-layer0-service", - "text": "There are three methods of updating an existing Layer0 service. The first method is to update the existing Deploy to refer to a new Docker task definition. The second method is to create a new Service that uses the same Loadbalancer. The third method is to create both a new Loadbalancer and a new Service. There are advantages and disadvantages to each of these methods. The following sections discuss the advantages and disadvantages of using each method, and include procedures for implementing each method.", - "title": "Updating a Layer0 service" - }, - { - "location": "/reference/updateservice/#method-1-refer-to-a-new-task-definition", - "text": "This method of updating a Layer0 application is the easiest to implement, because you do not need to rescale the Service or modify the Loadbalancer. This method is completely transparent to all other components of the application, and using this method does not involve any downtime. The disadvantage of using this method is that you cannot perform A/B testing of the old and new services, and you cannot control which traffic goes to the old service and which goes to the new one. To replace a Deploy to refer to a new task definition: At the command line, type the following to create a new Deploy: l0 deploy create [pathToTaskDefinition] [deployName] Note that if [deployName] already exists, this step will create a new version of that Deploy. Type the following to update the existing Service: l0 service update [existingServiceName] [deployName] By default, the Service you specify in this command will refer to the latest version of [deployName] , if multiple versions of the Deploy exist. Note If you want to refer to a specific version of the Deploy, type the following command instead of the one shown above: l0 service update [serviceName] [deployName]:[deployVersion]", - "title": "Method 1: Refer to a new task definition" - }, - { - "location": "/reference/updateservice/#method-2-create-a-new-deploy-and-service-using-the-same-loadbalancer", - "text": "This method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the l0 service scale command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates. The disadvantage of using this method is that you cannot control the routing of traffic between the old and new versions of the application. To create a new Deploy and Service: At the command line, type the following to create a new Deploy (or a new version of the Deploy, if [deployName] already exists): l0 deploy create [pathToTaskDefinition] [deployName] Type the following command to create a new Service that refers to [deployName] behind an existing Loadbalancer named [loadbalancerName] : l0 service create --loadbalancer [loadbalancerName] [environmentName] [deployName] Check to make sure that the new Service is working as expected. If it is, and you do not want to keep the old Service, type the following command to delete the old Service: l0 service delete [oldServiceName]", - "title": "Method 2: Create a new Deploy and Service using the same Loadbalancer" - }, - { - "location": "/reference/updateservice/#method-3-create-a-new-deploy-loadbalancer-and-service", - "text": "The final method of updating a Layer0 service is to create an entirely new Deploy, Loadbalancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services. The disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Loadbalancer. To create a new Deploy, Loadbalancer and Service: At the command line, type the following command to create a new Deploy: l0 deploy create [pathToTaskDefinition] [deployName] Type the following command to create a new Loadbalancer: l0 loadbalancer create --port [portNumber] [environmentName] [loadbalancerName] [deployName] Note The value of [loadbalancerName] in the above command must be unique. Type the following command to create a new Service: l0 service create --loadbalancer [loadBalancerName] [environmentName] [serviceName] [deployName] Note The value of [serviceName] in the above command must be unique. Implement a method of routing traffic between the old and new Services, such as HAProxy or Consul .", - "title": "Method 3: Create a new Deploy, Loadbalancer and Service" - }, - { - "location": "/reference/consul/", - "text": "Consul reference\n#\n\n\nConsul\n is an open-source tool for discovering and configuring services in your network architecture. Specifically, Consul provides the following features:\n\n\n\n\nDiscovery of services\n\n\nMonitoring of the health of services\n\n\nKey/value storage with a simple HTTP API\n\n\n\n\nConsul Agent\n#\n\n\nThe \nConsul Agent\n exposes a DNS API for easy consumption of data generated by \nRegistrator\n. The Consul Agent can run either in server or client mode.\n\n\nWhen run as a Layer0 Service, the Consul Agent runs in server mode. To ensure the integrity of your data, the service in which you run consul should be scaled to size 3 or greater. A group of several consul deployments is known as a \"\ncluster\n.\"\n\n\nOther Layer0 Services that use Consul will run the Consul Agent in client mode, alongside their application containers.\nThe client is a very lightweight process that registers services, runs health checks, and forwards queries to servers.\n\n\nRegistrator\n#\n\n\nRegistrator\n is a tool that automatically registers and deregisters services into a Consul Cluster by inspecting Docker containers as they come online.\nContainer registration is based off of environment variables on the container.\nLayer0 Services that use Consul will run Registrator alongside their application containers.\n\n\nService Configuration\n#\n\n\nLayer0 Services that use Consul will need to add the \nRegistrator\n and \nConsul Agent\n definitions to the\n\ncontainerDefinitions\n section of your Deploys. You must also add the \nDocker Socket\n definition to the \nvolumes\n section of your Deploys.\n\n\nFor an example of a Deploy that uses Consul, see the \nGuestbook with Consul\n guide.\n\n\n\n\nRegistrator Container Definition\n#\n\n\n{\n \nname\n: \nregistrator\n,\n \nimage\n: \ngliderlabs/registrator:master\n,\n \nessential\n: true,\n \nlinks\n: [\nconsul-agent\n],\n \nentrypoint\n: [\n/bin/sh\n, \n-c\n],\n \ncommand\n: [\n/bin/registrator -retry-attempts=-1 -retry-interval=30000 -ip $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) consul://consul-agent:8500\n],\n \nmemory\n: 128,\n \nmountPoints\n: [\n {\n \nsourceVolume\n: \ndockersocket\n,\n \ncontainerPath\n: \n/tmp/docker.sock\n\n }\n ]\n},\n\n\n\n\n\n\nConsul Agent Container Definition\n#\n\n\n\n\nWarning\n\n\n\n\nYou must replace \nurl\n with your Layer0 Consul Load Balancer's\n\n\n{\n \nname\n: \nconsul-agent\n,\n \nimage\n: \nprogrium/consul\n,\n \nessential\n: true,\n \nentrypoint\n: [\n/bin/bash\n, \n-c\n],\n \ncommand\n: [\n/bin/start -advertise $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) -retry-join $EXTERNAL_URL -recursor $UPSTREAM_DNS -retry-interval 30s\n],\n \nmemory\n: 128,\n \nportMappings\n: [\n {\n \nhostPort\n: 8500,\n \ncontainerPort\n: 8500\n },\n {\n \nhostPort\n: 53,\n \ncontainerPort\n: 53,\n \nprotocol\n: \nudp\n\n }\n ],\n \nenvironment\n: [\n {\n \nname\n: \nEXTERNAL_URL\n,\n \nvalue\n: \nurl\n\n },\n {\n \nname\n: \nUPSTREAM_DNS\n,\n \nvalue\n: \n10.100.0.2\n\n }\n ]\n},\n\n\n\n\nEnvironment Variables\n#\n\n\n\n\nEXTERNAL_URL\n - URL of the consul cluster\n\n\nUPSTREAM_DNS\n - The DNS server consul-agent queries for DNS entries that it cannot resolve internally (e.g. google.com)\n\n\nThe default value for \nUPSTREAM_DNS\n assumes you're using the default Layer0 configuration, making your internal DNS endpoint 10.100.0.2. If you are a using a non standard configuration (e.g. installing Layer0 in an existing VPC with a CIDR other than \n10.100.0.0/16\n) please modify this variable accordingly.\n\n\n\n\n\n\n\n\n\n\nDocker Socket Volume Definition\n#\n\n\nvolumes\n: [\n {\n \nname\n: \ndockersocket\n,\n \nhost\n: {\n \nsourcePath\n: \n/var/run/docker.sock\n\n }\n }\n],", - "title": "Consul" - }, - { - "location": "/reference/consul/#consul-reference", - "text": "Consul is an open-source tool for discovering and configuring services in your network architecture. Specifically, Consul provides the following features: Discovery of services Monitoring of the health of services Key/value storage with a simple HTTP API", - "title": "Consul reference" - }, - { - "location": "/reference/consul/#consul-agent", - "text": "The Consul Agent exposes a DNS API for easy consumption of data generated by Registrator . The Consul Agent can run either in server or client mode. When run as a Layer0 Service, the Consul Agent runs in server mode. To ensure the integrity of your data, the service in which you run consul should be scaled to size 3 or greater. A group of several consul deployments is known as a \" cluster .\" Other Layer0 Services that use Consul will run the Consul Agent in client mode, alongside their application containers.\nThe client is a very lightweight process that registers services, runs health checks, and forwards queries to servers.", - "title": "Consul Agent" - }, - { - "location": "/reference/consul/#registrator", - "text": "Registrator is a tool that automatically registers and deregisters services into a Consul Cluster by inspecting Docker containers as they come online.\nContainer registration is based off of environment variables on the container.\nLayer0 Services that use Consul will run Registrator alongside their application containers.", - "title": "Registrator" - }, - { - "location": "/reference/consul/#service-configuration", - "text": "Layer0 Services that use Consul will need to add the Registrator and Consul Agent definitions to the containerDefinitions section of your Deploys. You must also add the Docker Socket definition to the volumes section of your Deploys. For an example of a Deploy that uses Consul, see the Guestbook with Consul guide.", - "title": "Service Configuration" - }, - { - "location": "/reference/consul/#registrator-container-definition", - "text": "{\n name : registrator ,\n image : gliderlabs/registrator:master ,\n essential : true,\n links : [ consul-agent ],\n entrypoint : [ /bin/sh , -c ],\n command : [ /bin/registrator -retry-attempts=-1 -retry-interval=30000 -ip $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) consul://consul-agent:8500 ],\n memory : 128,\n mountPoints : [\n {\n sourceVolume : dockersocket ,\n containerPath : /tmp/docker.sock \n }\n ]\n},", - "title": "Registrator Container Definition" - }, - { - "location": "/reference/consul/#consul-agent-container-definition", - "text": "Warning You must replace url with your Layer0 Consul Load Balancer's {\n name : consul-agent ,\n image : progrium/consul ,\n essential : true,\n entrypoint : [ /bin/bash , -c ],\n command : [ /bin/start -advertise $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) -retry-join $EXTERNAL_URL -recursor $UPSTREAM_DNS -retry-interval 30s ],\n memory : 128,\n portMappings : [\n {\n hostPort : 8500,\n containerPort : 8500\n },\n {\n hostPort : 53,\n containerPort : 53,\n protocol : udp \n }\n ],\n environment : [\n {\n name : EXTERNAL_URL ,\n value : url \n },\n {\n name : UPSTREAM_DNS ,\n value : 10.100.0.2 \n }\n ]\n},", - "title": "Consul Agent Container Definition" - }, - { - "location": "/reference/consul/#environment-variables", - "text": "EXTERNAL_URL - URL of the consul cluster UPSTREAM_DNS - The DNS server consul-agent queries for DNS entries that it cannot resolve internally (e.g. google.com) The default value for UPSTREAM_DNS assumes you're using the default Layer0 configuration, making your internal DNS endpoint 10.100.0.2. If you are a using a non standard configuration (e.g. installing Layer0 in an existing VPC with a CIDR other than 10.100.0.0/16 ) please modify this variable accordingly.", - "title": "Environment Variables" - }, - { - "location": "/reference/consul/#docker-socket-volume-definition", - "text": "volumes : [\n {\n name : dockersocket ,\n host : {\n sourcePath : /var/run/docker.sock \n }\n }\n],", - "title": "Docker Socket Volume Definition" - }, - { - "location": "/reference/task_definition/", - "text": "Task Definitions\n#\n\n\nThis guide gives some overview into the composition of a task definition.\nFor more comprehensive documentation, we recommend taking a look at the official AWS docs:\n\n\n\n\nCreating a Task Definition\n\n\nTask Definition Parameters\n\n\n\n\nSample\n#\n\n\nThe following snippet contains the task definition for the \nGuestbook\n application\n\n\n{\n \nAWSEBDockerrunVersion\n: 2,\n \ncontainerDefinitions\n: [\n {\n \nname\n: \nguestbook\n,\n \nimage\n: \nquintilesims/guestbook\n,\n \nessential\n: true,\n \nmemory\n: 128,\n \nportMappings\n: [\n {\n \nhostPort\n: 80,\n \ncontainerPort\n: 80\n }\n ],\n }\n ]\n}\n\n\n\n\n\n\nName\n The name of the container\n\n\n\n\n\n\nWarning\n\n\n\n\nIf you wish to update your task definition, the container names \nmust\n remain the same.\nIf any container names are changed or removed in an updated task definition,\nECS will not know how the existing container(s) should be mapped over and you will not be able to deploy the updated task definition.\nIf you encounter a scenario where you must change or remove a container's name in a task definition, we recommend re-creating the Layer0 Deploy and Service.\n\n\n\n\nImage\n The Docker image used to build the container. The image format is \nurl/image:tag\n\n\nThe \nurl\n specifies which Docker Repo to pull the image from\n If a non-Docker-Hub \nurl\n is not specified, \nDocker Hub\n is used (as is the case here)\n\n\nThe \nimage\n specifies the name of the image to grab (in this case, the \nguestbook\n image from the \nquintilesims\n Docker Hub group)\n\n\nThe \ntag\n specifies which version of image to grab\nIf \ntag\n is not specified, \n:latest\n is used\n\n\n\n\n\n\nEssential\n If set to \ntrue\n, all other containers in the task definition will be stopped if that container fails or stops for any reason.\nOtherwise, the container's failure will not affect the rest of the containers in the task definition.\n\n\nMemory\n The number of MiB of memory to reserve for the container.\nIf your container attempts to exceed the memory allocated here, the container is killed\n\n\nPortMappings\n A list of hostPort, containerPort mappings for the container\n\n\nHostPort\n The port number on the host instance reserved for your container.\nIf your Layer0 Service is behind a Layer0 Load Balancer, this should map to an \ninstancePort\n on the Layer0 Load Balancer.\n\n\nContainerPort\n The port number the container should receive traffic on.\nAny traffic received from the instance's \nhostPort\n will be forwarded to the container on this port", - "title": "Task Definitions" - }, - { - "location": "/reference/task_definition/#task-definitions", - "text": "This guide gives some overview into the composition of a task definition.\nFor more comprehensive documentation, we recommend taking a look at the official AWS docs: Creating a Task Definition Task Definition Parameters", - "title": "Task Definitions" - }, - { - "location": "/reference/task_definition/#sample", - "text": "The following snippet contains the task definition for the Guestbook application {\n AWSEBDockerrunVersion : 2,\n containerDefinitions : [\n {\n name : guestbook ,\n image : quintilesims/guestbook ,\n essential : true,\n memory : 128,\n portMappings : [\n {\n hostPort : 80,\n containerPort : 80\n }\n ],\n }\n ]\n} Name The name of the container Warning If you wish to update your task definition, the container names must remain the same.\nIf any container names are changed or removed in an updated task definition,\nECS will not know how the existing container(s) should be mapped over and you will not be able to deploy the updated task definition.\nIf you encounter a scenario where you must change or remove a container's name in a task definition, we recommend re-creating the Layer0 Deploy and Service. Image The Docker image used to build the container. The image format is url/image:tag The url specifies which Docker Repo to pull the image from\n If a non-Docker-Hub url is not specified, Docker Hub is used (as is the case here) The image specifies the name of the image to grab (in this case, the guestbook image from the quintilesims Docker Hub group) The tag specifies which version of image to grab\nIf tag is not specified, :latest is used Essential If set to true , all other containers in the task definition will be stopped if that container fails or stops for any reason.\nOtherwise, the container's failure will not affect the rest of the containers in the task definition. Memory The number of MiB of memory to reserve for the container.\nIf your container attempts to exceed the memory allocated here, the container is killed PortMappings A list of hostPort, containerPort mappings for the container HostPort The port number on the host instance reserved for your container.\nIf your Layer0 Service is behind a Layer0 Load Balancer, this should map to an instancePort on the Layer0 Load Balancer. ContainerPort The port number the container should receive traffic on.\nAny traffic received from the instance's hostPort will be forwarded to the container on this port", - "title": "Sample" - }, - { - "location": "/reference/architecture/", - "text": "Layer0 Architecture\n#\n\n\nLayer0 is built on top of the following primary technologies:\n\n\n\n\nApplication Container: \nDocker\n\n\nCloud Provider: \nAmazon Web Services\n\n\nContainer Management: \nAmazon EC2 Container Service (ECS)\n\n\nLoad Balancing: \nAmazon Elastic Load Balancing\n\n\nInfrastructure Configuration: Hashicorp \nTerraform\n\n\nIdentity Management: \nAuth0", - "title": "Architecture" - }, - { - "location": "/reference/architecture/#layer0-architecture", - "text": "Layer0 is built on top of the following primary technologies: Application Container: Docker Cloud Provider: Amazon Web Services Container Management: Amazon EC2 Container Service (ECS) Load Balancing: Amazon Elastic Load Balancing Infrastructure Configuration: Hashicorp Terraform Identity Management: Auth0", - "title": "Layer0 Architecture" - }, - { - "location": "/reference/ecr/", - "text": "EC2 Container Registry\n#\n\n\nECR is an Amazon implementation of a docker registry. It acts as a private registry in your AWS account, which can be accessed from any docker client, and Layer0. Consider using ECR if you have stability issues with hosted docker registries, and do not wish to share your images publicly on \ndockerhub\n.\n\n\nSetup\n#\n\n\nWhen interacting with ECR, you will first need to create a repository and a login to interact from your development machine.\n\n\nRepository\n#\n\n\nEach repository needs to be created by an AWS api call.\n\n\n \n aws ecr create-repository --repository-name myteam/myproject\n\n\n\n\nLogin\n#\n\n\nTo authenticate with the ECR service, Amazon provides the \nget-login\n command, which generates an authentication token, and returns a docker command to set it up\n\n\n \n aws ecr get-login\n # this command will return the following: (password is typically hundreds of characters)\n docker login -u AWS -p password -e none https://aws_account_id.dkr.ecr.us-east-1.amazonaws.com\n\n\n\n\nExecute the provided docker command to store the login credentials\n\n\nAfterward creating the repository and local login credentials you may interact with images (and tags) under this path from a local docker client.\n\n\n docker pull ${ecr-url}/myteam/myproject\n docker push ${ecr-url}/myteam/myproject:custom-tag-1\n\n\n\n\nDeploy Example\n#\n\n\nHere we'll walk through using ECR when deploying to Layer0, Using a very basic wait container.\n\n\nMake docker image\n#\n\n\nYour docker image can be built locally or pulled from dockerhub. For this example, we made a service that waits and then exits (useful for triggering regular restarts).\n\n\nFROM busybox\n\nENV SLEEP_TIME=60\n\nCMD sleep $SLEEP_TIME\n\n\n\n\nThen build the file, with the tag \nxfra/wait\n\n\n \n docker build -f Dockerfile.wait -t xfra/wait .\n\n\n\n\nUpload to ECR\n#\n\n\nAfter preparing a login and registry, tag the image with the remote url, and use \ndocker push\n\n\n docker tag xfra/wait 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait\n docker push 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait\n\n\n\n\n\n\nNote: your account id in this url will be different.\n\n\n\n\nCreate a deploy\n#\n\n\nTo run this image in Layer0, we create a dockerrun file, describing the instance and any additional variables\n\n\n{\n \ncontainerDefinitions\n: [\n {\n \nname\n: \ntimeout\n,\n \nimage\n: \n111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait:latest\n,\n \nessential\n: true,\n \nmemory\n: 10,\n \nenvironment\n: [\n { \nname\n: \nSLEEP_TIME\n, \nvalue\n: \n43200\n }\n ]\n }\n ]\n}\n\n\n\n\nAnd create that in Layer0\n\n\n l0 deploy create timeout.dockerrun.aws.json timeout\n\n\n\n\nDeploy\n#\n\n\nFinally, run that deploy as a service or a task. (the service will restart every 12 hours)\n\n\n l0 service create demo timeoutsvc timeout:latest\n\n\n\n\nReferences\n#\n\n\n\n\nECR User Guide\n\n\ncreate-repository\n\n\nget-login", - "title": "ECR" - }, - { - "location": "/reference/ecr/#ec2-container-registry", - "text": "ECR is an Amazon implementation of a docker registry. It acts as a private registry in your AWS account, which can be accessed from any docker client, and Layer0. Consider using ECR if you have stability issues with hosted docker registries, and do not wish to share your images publicly on dockerhub .", - "title": "EC2 Container Registry" - }, - { - "location": "/reference/ecr/#setup", - "text": "When interacting with ECR, you will first need to create a repository and a login to interact from your development machine.", - "title": "Setup" - }, - { - "location": "/reference/ecr/#repository", - "text": "Each repository needs to be created by an AWS api call. aws ecr create-repository --repository-name myteam/myproject", - "title": "Repository" - }, - { - "location": "/reference/ecr/#login", - "text": "To authenticate with the ECR service, Amazon provides the get-login command, which generates an authentication token, and returns a docker command to set it up aws ecr get-login\n # this command will return the following: (password is typically hundreds of characters)\n docker login -u AWS -p password -e none https://aws_account_id.dkr.ecr.us-east-1.amazonaws.com Execute the provided docker command to store the login credentials Afterward creating the repository and local login credentials you may interact with images (and tags) under this path from a local docker client. docker pull ${ecr-url}/myteam/myproject\n docker push ${ecr-url}/myteam/myproject:custom-tag-1", - "title": "Login" - }, - { - "location": "/reference/ecr/#deploy-example", - "text": "Here we'll walk through using ECR when deploying to Layer0, Using a very basic wait container.", - "title": "Deploy Example" - }, - { - "location": "/reference/ecr/#make-docker-image", - "text": "Your docker image can be built locally or pulled from dockerhub. For this example, we made a service that waits and then exits (useful for triggering regular restarts). FROM busybox\n\nENV SLEEP_TIME=60\n\nCMD sleep $SLEEP_TIME Then build the file, with the tag xfra/wait docker build -f Dockerfile.wait -t xfra/wait .", - "title": "Make docker image" - }, - { - "location": "/reference/ecr/#upload-to-ecr", - "text": "After preparing a login and registry, tag the image with the remote url, and use docker push docker tag xfra/wait 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait\n docker push 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait Note: your account id in this url will be different.", - "title": "Upload to ECR" - }, - { - "location": "/reference/ecr/#create-a-deploy", - "text": "To run this image in Layer0, we create a dockerrun file, describing the instance and any additional variables {\n containerDefinitions : [\n {\n name : timeout ,\n image : 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait:latest ,\n essential : true,\n memory : 10,\n environment : [\n { name : SLEEP_TIME , value : 43200 }\n ]\n }\n ]\n} And create that in Layer0 l0 deploy create timeout.dockerrun.aws.json timeout", - "title": "Create a deploy" - }, - { - "location": "/reference/ecr/#deploy", - "text": "Finally, run that deploy as a service or a task. (the service will restart every 12 hours) l0 service create demo timeoutsvc timeout:latest", - "title": "Deploy" - }, - { - "location": "/reference/ecr/#references", - "text": "ECR User Guide create-repository get-login", - "title": "References" - }, - { - "location": "/troubleshooting/commonissues/", - "text": "Common issues and their solutions\n#\n\n\n\"Connection refused\" error when executing Layer0 commands\n#\n\n\nWhen executing commands using the Layer0 CLI, you may see the following error message: \"Get http://localhost:9090/\ncommand\n/: dial tcp 127.0.0.1:9090: connection refused\", where \ncommand\n is the Layer0 command you are trying to execute.\n\n\nThis error indicates that your Layer0 environment variables have not been set for the current session. See the \n\"Configure environment variables\" section\n of the Layer0 installation guide for instructions for setting up your environment variables.\n\n\n\n\n\"Invalid Dockerrun.aws.json\" error when creating a deploy\n#\n\n\nByte Order Marks (BOM) in Dockerrun file\n#\n\n\nIf your Dockerrun.aws.json file contains a Byte Order Marker, you may receive an \"Invalid Dockerrun.aws.json\" error when creating a deploy. If you create or edit the Dockerrun file using Visual Studio, and you have not modified the file encoding settings in Visual Studio, you are likely to encounter this error.\n\n\nTo remove the BOM:\n\n\n\n\n\n\nAt the command line, type the following to remove the BOM:\n\n\n\n\n(Linux/OS X) \ntail -c +4\n \nDockerrunFile\n \n \nDockerrunFileNew\n\n\nReplace \nDockerrunFile\n with the path to your Dockerrun file, and \nDockerrunFileNew\n with a new name for the Dockerrun file without the BOM.\n\n\n\n\n\n\n\n\nAlternatively, you can use the \ndos2unix file converter\n to remove the BOM from your Dockerrun files. Dos2unix is available for Windows, Linux and Mac OS.\n\n\nTo remove the BOM using dos2unix:\n\n\n\n\n\n\nAt the command line, type the following:\n\n\n\n\ndos2unix --remove-bom -n\n \nDockerrunFile\n \nDockerrunFileNew\n\n\nReplace \nDockerrunFile\n with the path to your Dockerrun file, and \nDockerrunFileNew\n with a new name for the Dockerrun file without the BOM.\n\n\n\n\n\n\n\n\n\n\n\"AWS Error: the key pair '\n' does not exist (code 'ValidationError')\" with l0-setup\n#\n\n\nThis occurs when you pass a non-existent EC2 keypair to l0-setup. To fix this, follow the instructions for \ncreating an EC2 Key Pair\n.\n\n\n\n\nAfter you've created a new EC2 Key Pair, run the following command:\n\n\n \nl0-setup plan\n \nprefix\n \n-var key_pair\n=\nkeypair", - "title": "Common Issues" - }, - { - "location": "/troubleshooting/commonissues/#common-issues-and-their-solutions", - "text": "", - "title": "Common issues and their solutions" - }, - { - "location": "/troubleshooting/commonissues/#connection-refused-error-when-executing-layer0-commands", - "text": "When executing commands using the Layer0 CLI, you may see the following error message: \"Get http://localhost:9090/ command /: dial tcp 127.0.0.1:9090: connection refused\", where command is the Layer0 command you are trying to execute. This error indicates that your Layer0 environment variables have not been set for the current session. See the \"Configure environment variables\" section of the Layer0 installation guide for instructions for setting up your environment variables.", - "title": "\"Connection refused\" error when executing Layer0 commands" - }, - { - "location": "/troubleshooting/commonissues/#invalid-dockerrunawsjson-error-when-creating-a-deploy", - "text": "", - "title": "\"Invalid Dockerrun.aws.json\" error when creating a deploy" - }, - { - "location": "/troubleshooting/commonissues/#byte-order-marks-bom-in-dockerrun-file", - "text": "If your Dockerrun.aws.json file contains a Byte Order Marker, you may receive an \"Invalid Dockerrun.aws.json\" error when creating a deploy. If you create or edit the Dockerrun file using Visual Studio, and you have not modified the file encoding settings in Visual Studio, you are likely to encounter this error. To remove the BOM: At the command line, type the following to remove the BOM: (Linux/OS X) tail -c +4 DockerrunFile DockerrunFileNew Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM. Alternatively, you can use the dos2unix file converter to remove the BOM from your Dockerrun files. Dos2unix is available for Windows, Linux and Mac OS. To remove the BOM using dos2unix: At the command line, type the following: dos2unix --remove-bom -n DockerrunFile DockerrunFileNew Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM.", - "title": "Byte Order Marks (BOM) in Dockerrun file" - }, - { - "location": "/troubleshooting/commonissues/#aws-error-the-key-pair-does-not-exist-code-validationerror-with-l0-setup", - "text": "This occurs when you pass a non-existent EC2 keypair to l0-setup. To fix this, follow the instructions for creating an EC2 Key Pair . After you've created a new EC2 Key Pair, run the following command: \n l0-setup plan prefix -var key_pair = keypair", - "title": "\"AWS Error: the key pair '' does not exist (code 'ValidationError')\" with l0-setup" - }, - { - "location": "/troubleshooting/ssh/", - "text": "Secure Shell (SSH)\n#\n\n\nYou can use Secure Shell (SSH) to access your Layer0 environment(s).\n\n\nBy default, Layer0 Setup asks for an EC2 key pair when creating a new Layer0. This key pair is associated with all machines that host your Layer0 Services. This means you can use SSH to log into the underlying Docker host to perform tasks such as troubleshooting failing containers or viewing logs. For information about creating an EC2 key pair, see \nInstall and Configure Layer0\n.\n\n\n\n\nWarning\n\n\nThis section is recommended for development debugging only.\nIt is \nnot\n recommended for production environments.\n\n\n\n\nTo SSH into a Service\n#\n\n\n\n\nIn a console window, add port 2222:22/tcp to your Service's load balancer:\n\n\n\n\nl0 loadbalancer addport \nname\n 2222:22/tcp\n\n\n\n\n\n \nSSH into your Service by supplying the load balancer url and key pair file name.\n\n\n\n\n\nssh -i \nkey pair path and file name\n ec2-user@\nload balancer url\n -p 2222\n\n\n\n\n\n \nIf required, Use Docker to access a specific container with Bash.\n\n\n\n\n\ndocker exec -it \ncontainer id\n /bin/bash\n\n\n\n\nRemarks\n#\n\n\nYou can get the load balancer url from the Load Balancers section of your Layer0 AWS console.\n\n\nUse the \nloadbalancer dropport\n subcommand to remove a port configuration from an existing Layer0 load balancer.\n\n\nYou \ncannot\n change the key pair after a Layer0 has been created. If you lose your key pair or need to generate a new one, you will need to create a new Layer0.\n\n\nIf your Service is behind a private load balancer, or none at all, you can either re-create your Service behind a public load balancer, use an existing public load balancer as a \"jump\" point, or create a new Layer0 Service behind a public load balancer to serve as a \"jump\" point.", - "title": "Secure Shell (SSH)" - }, - { - "location": "/troubleshooting/ssh/#secure-shell-ssh", - "text": "You can use Secure Shell (SSH) to access your Layer0 environment(s). By default, Layer0 Setup asks for an EC2 key pair when creating a new Layer0. This key pair is associated with all machines that host your Layer0 Services. This means you can use SSH to log into the underlying Docker host to perform tasks such as troubleshooting failing containers or viewing logs. For information about creating an EC2 key pair, see Install and Configure Layer0 . Warning This section is recommended for development debugging only.\nIt is not recommended for production environments.", - "title": "Secure Shell (SSH)" - }, - { - "location": "/troubleshooting/ssh/#to-ssh-into-a-service", - "text": "In a console window, add port 2222:22/tcp to your Service's load balancer: l0 loadbalancer addport name 2222:22/tcp \n SSH into your Service by supplying the load balancer url and key pair file name. ssh -i key pair path and file name ec2-user@ load balancer url -p 2222 \n If required, Use Docker to access a specific container with Bash. docker exec -it container id /bin/bash", - "title": "To SSH into a Service" - }, - { - "location": "/troubleshooting/ssh/#remarks", - "text": "You can get the load balancer url from the Load Balancers section of your Layer0 AWS console. Use the loadbalancer dropport subcommand to remove a port configuration from an existing Layer0 load balancer. You cannot change the key pair after a Layer0 has been created. If you lose your key pair or need to generate a new one, you will need to create a new Layer0. If your Service is behind a private load balancer, or none at all, you can either re-create your Service behind a public load balancer, use an existing public load balancer as a \"jump\" point, or create a new Layer0 Service behind a public load balancer to serve as a \"jump\" point.", - "title": "Remarks" - } - ] -} \ No newline at end of file diff --git a/docs/reference/architecture/index.html b/docs/reference/architecture/index.html index 3b6206b87..bef883a9c 100644 --- a/docs/reference/architecture/index.html +++ b/docs/reference/architecture/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Architecture - Layer0 - + @@ -18,427 +18,589 @@ + + + + + + + + + + + + + + + + + + - - - - - - + + Architecture - Layer0 + - + + + + - - - - - + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    Layer0 Architecture#

    +
    + +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/cli/index.html b/docs/reference/cli/index.html index 3dc482956..60b735de2 100644 --- a/docs/reference/cli/index.html +++ b/docs/reference/cli/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Layer0 CLI - Layer0 - + @@ -18,568 +18,3221 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Layer0 CLI - Layer0 + + + + + + - - - - - + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - + +
  • +
    +
    + + +
    +
    +
    + + + + - - - - - +
  • + + Task + + +
  • -
    -
    +
  • + + Usage + + +
  • + +
  • + + Required parameters + + +
  • + +
  • + + Optional arguments + + +
  • + + + + + + +
  • + + task delete + + + + +
  • + +
  • + + task get + + + + +
  • + +
  • + + task list + + + + +
  • + +
  • + + task logs + + + + +
  • + +
  • + + task list + + + + +
  • + + + + + + + + + + -
    -
    -
    +
    +
    + + -

    Layer0 CLI Reference#

    +
    +
    + + + + + +

    Layer0 CLI Reference#

    Global options#

    -

    The l0 application is designed to be used with one of several subcommands: admin, deploy, environment, job, loadbalancer, service, and task. These subcommands are detailed in the sections below. There are, however, some global parameters that you may specify when using l0.

    +

    The l0 application is designed to be used with one of several commands: admin, deploy, environment, job, loadbalancer, service, and task. These commands are detailed in the sections below. There are, however, some global parameters that you may specify whenever using l0.

    Usage#

    -

    -
    -
    l0 [globalOptions] command subcommand [options] [parameters]
    -
    -

    -

    Optional arguments#

    -

    -
    -
    --output {text|json}
    -
    Specify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the --output json option, you can force l0 to output JSON-formatted text.
    -
    -
    -
    --version
    -
    Display the version number of the l0 application.
    -
    -

    +
    l0 [global options] command subcommand [subcommand options] params
    +
    + + +

    Global options#

    +
      +
    • -o [text|json], --output [text|json] - Specify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the --output json option, you can force l0 to output JSON-formatted text.
    • +
    • -t value, --timeout value - Specify the timeout for running l0 commands. Values can be in h, m, s, or ms.
    • +
    • -d, --debug - Print debug statements
    • +
    • -v, --version - Display the version number of the l0 application.
    • +

    Admin#

    -

    The admin command is used to manage the Layer0 API server. This command is used with the following subcommands: debug, sql, and version.

    +

    The admin command is used to manage the Layer0 API server. This command is used with the following subcommands: debug, sql, and version.

    admin debug#

    -

    Use the debug subcommand to view the running version of your Layer0 API server and CLI.

    +

    Use the debug subcommand to view the running version of your Layer0 API server and CLI.

    Usage#

    -

    -
    -
    l0 admin debug
    -
    -

    +
    l0 admin debug
    +
    + +

    admin sql#

    -

    Use the sql subcommand to initialize the Layer0 API database.

    +

    Use the sql subcommand to initialize the Layer0 API database.

    Usage#

    -

    -
    -
    l0 admin sql
    -
    -

    +
    l0 admin sql
    +
    + +

    Additional information#

    -

    -
    -
    The sql subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so.
    -
    -

    +

    The sql subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so.

    admin version#

    -

    Use the version subcommand to display the current version of the Layer0 API.

    +

    Use the version subcommand to display the current version of the Layer0 API.

    Usage#

    -

    -
    -
    l0 admin version
    -
    -

    +
    l0 admin version 
    +
    + +

    Deploy#

    +

    Deploys are ECS Task Definitions. They are configuration files that detail how to deploy your application. +The deploy command is used to manage Layer0 environments. This command is used with the following subcommands: create, delete, get, and list.

    deploy create#

    -

    Use the create subcommand to upload a Docker task definition into Layer0. This command is used with the following subcommands: create, delete, get and list.

    +

    Use the create subcommand to upload a Docker task definition into Layer0.

    Usage#

    -

    -
    -
    l0 deploy create dockerPath deployName
    -
    -

    +
    l0 deploy create taskDefPath deployName
    +
    + +

    Required parameters#

    -

    -
    -
    dockerPath
    -
    The path to the Docker task definition that you want to upload.
    -
    -
    -
    deployName
    -
    A name for the deploy.
    -
    -

    +
      +
    • taskDefPath - The path to the Docker task definition that you want to upload.
    • +
    • deployName - A name for the deploy.
    • +

    Additional information#

    -

    -
    -
    If deployName exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version.
    -

    -
    -
    If you use Visual Studio to modify or create your Dockerrun file, you may see an "Invalid Dockerrun.aws.json" error. This error is caused by the default encoding used by Visual Studio. See the "Common issues" page for steps to resolve this issue.
    -

    -
    -
    -Deploys created through Layer0 are rendered with a logConfiguration section for each container. +

    If deployName exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version.

    +

    If you use Visual Studio to modify or create your Dockerrun file, you may see an "Invalid Dockerrun.aws.json" error. This error is caused by the default encoding used by Visual Studio. See the "Common issues" page for steps to resolve this issue.

    +

    Deploys created through Layer0 are rendered with a logConfiguration section for each container. If a logConfiguration section already exists, no changes are made to the section. The additional section enables logs from each container to be sent to the the Layer0 log group. This is where logs are looked up during l0 <entity> logs commands. The added logConfiguration section uses the following template:

    -
    "logConfiguration": {
    +
    "logConfiguration": {
         "logDriver": "awslogs",
             "options": {
                 "awslogs-group": "l0-<prefix>",
    @@ -587,174 +3240,145 @@ 

    Additional information

    +} +
    -

    -
    -

    deploy delete#

    -

    Use the delete subcommand to delete a version of a Layer0 deploy.

    +

    Use the delete subcommand to delete a version of a Layer0 deploy.

    Usage#

    -

    -
    -
    l0 deploy delete deployID
    -
    -

    +
    l0 deploy delete deployName
    +
    + +

    Required parameters#

    -

    -
    -
    deployID
    -
    The unique identifier of the version of the deploy that you want to delete. You can obtain a list of deployIDs for a given deploy by executing the following command: l0 deploy get deployName
    -
    -

    +
      +
    • deployName - The name of the Layer0 deploy you want to delete.
    • +

    deploy get#

    -

    Use the get subcommand to view information about an existing Layer0 deploy.

    +

    Use the get subcommand to view information about an existing Layer0 deploy.

    Usage#

    -

    -
    -
    l0 deploy get deployName
    -
    -

    +
    l0 deploy get deployName
    +
    + +

    Required parameters#

    -

    -
    -
    deployName
    -
    The name of the Layer0 deploy for which you want to view additional information.
    -
    -

    +
      +
    • deployName - The name of the Layer0 deploy for which you want to view additional information.
    • +

    Additional information#

    -

    -
    -
    The get subcommand supports wildcard matching: l0 deploy get dep* would return all deploys beginning with dep.
    -
    -

    +

    The get subcommand supports wildcard matching: l0 deploy get dep* would return all deploys beginning with dep.

    deploy list#

    -

    Use the list subcommand to view a list of deploys in your instance of Layer0.

    +

    Use the list subcommand to view a list of deploys in your instance of Layer0.

    Usage#

    -

    -
    -
    l0 deploy list
    -
    -

    +
    l0 deploy list
    +
    + +

    Environment#

    Layer0 environments allow you to isolate services and load balancers for specific applications. -The environment command is used to manage Layer0 environments. This command is used with the following subcommands: create, delete, get, list, and setmincount.

    +The environment command is used to manage Layer0 environments. This command is used with the following subcommands: create, delete, get, list, and setmincount.

    environment create#

    -

    Use the create subcommand to create an additional Layer0 environment (environmentName).

    +

    Use the create subcommand to create a new Layer0 environment.

    Usage#

    -

    -
    -
    l0 environment create [--size] [--min-count] [--user-data] [--os] [--ami] environmentName
    -
    -

    +
    l0 environment create [--size size | --min-count mincount | 
    +    --user-data path | --os os | --ami amiID] environmentName
    +
    + +

    Required parameters#

    -

    -
    -
    environmentName
    -
    A name for the environment.
    -
    -

    -

    Optional arguments#

    -

    -
    -
    --size
    -
    The size of the EC2 instances to create in your environment (default: m3.medium).
    -
    -
    -
    --min-count
    -
    The minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0).
    -
    -
    -
    --user-data
    -
    The user data template to use for the environment's autoscaling group.
    -
    -
    -
    --os
    -
    The operating system used in the environment. Options are "linux" or "windows" (default: linux). - More information on windows environments is documented below
    -
    -
    -
    --ami
    -
    A custom AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system.
    -
    -

    -

    The user data template can be used to add custom configuration to your Layer0 environment. +

      +
    • environmentName - A name for the environment.
    • +
    +

    Optional arguments#

    +
      +
    • --size size - The instance size of the EC2 instances to create in your environment (default: m3.medium).
    • +
    • --min-count mincount - The minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0).
    • +
    • --user-data path - The user data template file to use for the environment's autoscaling group.
    • +
    • --os os - The operating system used in the environment. Options are "linux" or "windows" (default: linux). More information on windows environments is documented below.
    • +
    • ami amiID - A custom EC2 AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system.
    • +
    +

    The user data template can be used to add custom configuration to your Layer0 environment. They are usually scripts that are executed at instance launch time to ensure an EC2 instance is in the correct state after the provisioning process finishes. Layer0 uses Go Templates to render user data. -Currently, two variables are passed into the template: ECSEnvironmentID and S3Bucket. -Please review the ECS Tutorial +Currently, two variables are passed into the template: ECSEnvironmentID and S3Bucket.

    +
    +

    Danger

    +

    Please review the ECS Tutorial to better understand how to write a user data template, and use at your own risk!

    +

    Linux Environments: The default Layer0 user data template is:

    -
    #!/bin/bash
    -echo ECS_CLUSTER={{ .ECSEnvironmentID }} >> /etc/ecs/ecs.config
    -echo ECS_ENGINE_AUTH_TYPE=dockercfg >> /etc/ecs/ecs.config
    +
    #!/bin/bash
    +echo ECS_CLUSTER={{ .ECSEnvironmentID }} >> /etc/ecs/ecs.config
    +echo ECS_ENGINE_AUTH_TYPE=dockercfg >> /etc/ecs/ecs.config
     yum install -y aws-cli awslogs jq
    -aws s3 cp s3://{{ .S3Bucket }}/bootstrap/dockercfg dockercfg
    -cfg=$(cat dockercfg)
    -echo ECS_ENGINE_AUTH_DATA=$cfg >> /etc/ecs/ecs.config
    +aws s3 cp s3://{{ .S3Bucket }}/bootstrap/dockercfg dockercfg
    +cfg=$(cat dockercfg)
    +echo ECS_ENGINE_AUTH_DATA=$cfg >> /etc/ecs/ecs.config
     docker pull amazon/amazon-ecs-agent:latest
    -start ecs
    +start ecs +

    Windows Environments: The default Layer0 user data template is:

    -
    <powershell>
    -# Set agent env variables for the Machine context (durable)
    -$clusterName = "{{ .ECSEnvironmentID }}"
    -Write-Host Cluster name set as: $clusterName -foreground green
    -
    -[Environment]::SetEnvironmentVariable("ECS_CLUSTER", $clusterName, "Machine")
    -[Environment]::SetEnvironmentVariable("ECS_ENABLE_TASK_IAM_ROLE", "false", "Machine")
    -$agentVersion = 'v1.14.0-1.windows.1'
    -$agentZipUri = "https://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-windows-$agentVersion.zip"
    -$agentZipMD5Uri = "$agentZipUri.md5"
    -
    -# Configure docker auth
    -Read-S3Object -BucketName {{ .S3Bucket }} -Key bootstrap/dockercfg -File dockercfg.json
    -$dockercfgContent = [IO.File]::ReadAllText("dockercfg.json")
    -[Environment]::SetEnvironmentVariable("ECS_ENGINE_AUTH_DATA", $dockercfgContent, "Machine")
    -[Environment]::SetEnvironmentVariable("ECS_ENGINE_AUTH_TYPE", "dockercfg", "Machine")
    -
    -### --- Nothing user configurable after this point ---
    -$ecsExeDir = "$env:ProgramFiles\Amazon\ECS"
    -$zipFile = "$env:TEMP\ecs-agent.zip"
    -$md5File = "$env:TEMP\ecs-agent.zip.md5"
    -
    -### Get the files from S3
    -Invoke-RestMethod -OutFile $zipFile -Uri $agentZipUri
    -Invoke-RestMethod -OutFile $md5File -Uri $agentZipMD5Uri
    -
    -## MD5 Checksum
    -$expectedMD5 = (Get-Content $md5File)
    -$md5 = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider
    -$actualMD5 = [System.BitConverter]::ToString($md5.ComputeHash([System.IO.File]::ReadAllBytes($zipFile))).replace('-', '')
    -if($expectedMD5 -ne $actualMD5) {
    -    echo "Download doesn't match hash."
    -    echo "Expected: $expectedMD5 - Got: $actualMD5"
    -    exit 1
    -}
    +
    <powershell>
    +# Set agent env variables for the Machine context (durable)
    +$clusterName = "{{ .ECSEnvironmentID }}"
    +Write-Host Cluster name set as: $clusterName -foreground green
     
    -## Put the executables in the executable directory.
    -Expand-Archive -Path $zipFile -DestinationPath $ecsExeDir -Force
    +[Environment]::SetEnvironmentVariable("ECS_CLUSTER", $clusterName, "Machine")
    +[Environment]::SetEnvironmentVariable("ECS_ENABLE_TASK_IAM_ROLE", "false", "Machine")
    +$agentVersion = 'v1.5.2'
    +$agentZipUri = "https://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-windows-$agentVersion.zip"
    +$agentZipMD5Uri = "$agentZipUri.md5"
     
    -## Start the agent script in the background.
    -$jobname = "ECS-Agent-Init"
    -$script =  "cd '$ecsExeDir'; .\amazon-ecs-agent.ps1"
    -$repeat = (New-TimeSpan -Minutes 1)
    -$jobpath = $env:LOCALAPPDATA + "\Microsoft\Windows\PowerShell\ScheduledJobs\$jobname\ScheduledJobDefinition.xml"
    +# Configure docker auth
    +Read-S3Object -BucketName {{ .S3Bucket }} -Key bootstrap/dockercfg -File dockercfg.json
    +$dockercfgContent = [IO.File]::ReadAllText("dockercfg.json")
    +[Environment]::SetEnvironmentVariable("ECS_ENGINE_AUTH_DATA", $dockercfgContent, "Machine")
    +[Environment]::SetEnvironmentVariable("ECS_ENGINE_AUTH_TYPE", "dockercfg", "Machine")
     
    -if($(Test-Path -Path $jobpath)) {
    -  echo "Job definition already present"
    -  exit 0
    -}
    +### --- Nothing user configurable after this point ---
    +$ecsExeDir = "$env:ProgramFiles\Amazon\ECS"
    +$zipFile = "$env:TEMP\ecs-agent.zip"
    +$md5File = "$env:TEMP\ecs-agent.zip.md5"
    +
    +### Get the files from S3
    +Invoke-RestMethod -OutFile $zipFile -Uri $agentZipUri
    +Invoke-RestMethod -OutFile $md5File -Uri $agentZipMD5Uri
    +
    +## MD5 Checksum
    +$expectedMD5 = (Get-Content $md5File)
    +$md5 = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider
    +$actualMD5 = [System.BitConverter]::ToString($md5.ComputeHash([System.IO.File]::ReadAllBytes($zipFile))).replace('-', '')
    +if($expectedMD5 -ne $actualMD5) {
    +    echo "Download doesn't match hash."
    +    echo "Expected: $expectedMD5 - Got: $actualMD5"
    +    exit 1
    +}
     
    -$scriptblock = [scriptblock]::Create("$script")
    -$trigger = New-JobTrigger -At (Get-Date).Date -RepeatIndefinitely -RepetitionInterval $repeat -Once
    -$options = New-ScheduledJobOption -RunElevated -ContinueIfGoingOnBattery -StartIfOnBattery
    -Register-ScheduledJob -Name $jobname -ScriptBlock $scriptblock -Trigger $trigger -ScheduledJobOption $options -RunNow
    -Add-JobTrigger -Name $jobname -Trigger (New-JobTrigger -AtStartup -RandomDelay 00:1:00)
    -</powershell>
    -<persist>true</persist>
    +## Put the executables in the executable directory. +Expand-Archive -Path $zipFile -DestinationPath $ecsExeDir -Force + +## Start the agent script in the background. +$jobname = "ECS-Agent-Init" +$script = "cd '$ecsExeDir'; .\amazon-ecs-agent.ps1" +$repeat = (New-TimeSpan -Minutes 1) +$jobpath = $env:LOCALAPPDATA + "\Microsoft\Windows\PowerShell\ScheduledJobs\$jobname\ScheduledJobDefinition.xml" + +if($(Test-Path -Path $jobpath)) { + echo "Job definition already present" + exit 0 +} + +$scriptblock = [scriptblock]::Create("$script") +$trigger = New-JobTrigger -At (Get-Date).Date -RepeatIndefinitely -RepetitionInterval $repeat -Once +$options = New-ScheduledJobOption -RunElevated -ContinueIfGoingOnBattery -StartIfOnBattery +Register-ScheduledJob -Name $jobname -ScriptBlock $scriptblock -Trigger $trigger -ScheduledJobOption $options -RunNow +Add-JobTrigger -Name $jobname -Trigger (New-JobTrigger -AtStartup -RandomDelay 00:1:00) +</powershell> +<persist>true</persist> +
    @@ -765,900 +3389,541 @@

    Optional argumentsenvironment delete#

    -

    Use the delete subcommand to delete an existing Layer0 environment.

    +

    Use the delete subcommand to delete an existing Layer0 environment.

    Usage#

    -

    -
    -
    l0 environment delete [--wait] environmentName
    -
    -

    +
    l0 environment delete [--wait] environmentName
    +
    + +

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the Layer0 environment that you want to delete.
    -
    -

    -

    Optional arguments#

    -

    -
    -
    --wait
    -
    Wait until the deletion is complete before exiting.
    -
    -

    +
      +
    • environmentName - The name of the Layer0 environment that you want to delete.
    • +
    +

    Optional arguments#

    +
      +
    • --wait - Wait until the deletion is complete before exiting.
    • +

    Additional information#

    -

    -
    -
    This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.
    -
    -

    +

    This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.

    environment get#

    -

    Use the get subcommand to display information about an existing Layer0 environment.

    +

    Use the get subcommand to display information about an existing Layer0 environment.

    Usage#

    -

    -
    -
    l0 environment get environmentName
    -
    -

    +
    l0 environment get environmentName
    +
    + +

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the Layer0 environment for which you want to view additional information.
    -
    -

    +
      +
    • environmentName - The name of the Layer0 environment for which you want to view additional information.
    • +

    Additional information#

    -

    -
    -
    The get subcommand supports wildcard matching: l0 environment get test* would return all environments beginning with test.
    -
    -

    +

    The get subcommand supports wildcard matching: l0 environment get test* would return all environments beginning with test.

    environment list#

    -

    Use the list subcommand to display a list of environments in your instance of Layer0.

    +

    Use the list subcommand to display a list of environments in your instance of Layer0.

    Usage#

    -

    -
    -
    l0 environment list
    -
    -

    +
    l0 environment list
    +
    + +

    environment setmincount#

    -

    Use the setmincount subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group.

    +

    Use the setmincount subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group.

    Usage#

    -

    -
    -
    l0 enviroment setmincount environmentName count
    -
    -

    +
    l0 environment setmincount environmentName count
    +
    + +

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the Layer0 environment that you want to delete.
    -
    -
    -
    count
    -
    The minimum number of instances allowed in the environment's autoscaling group.
    -
    -

    +
      +
    • environmentName - The name of the Layer0 environment that you want to adjust.
    • +
    • count - The minimum number of instances allowed in the environment's autoscaling group.
    • +
    -

    Use the link subcommand to link two environments together. +

    Use the link subcommand to link two environments together. When environments are linked, services inside the environments are allowed to communicate with each other as if they were in the same environment. This link is bidirectional. This command is idempotent; it will succeed even if the two specified environments are already linked.

    Usage#

    -

    -
    -
    l0 environment link sourceEnvironmentName destEnvironmentName
    -
    -

    +
    l0 environment link sourceEnvironmentName destEnvironmentName
    +
    + +

    Required parameters#

    -

    -
    -
    sourceEnvironmentName
    -
    The name of the first environment to link.
    -
    -
    -
    destEnvironmentName
    -
    The name of the second environment to link.
    -
    -

    +
      +
    • sourceEnvironmentName - The name of the source environment to link.
    • +
    • destEnvironmentName - The name of the destination environment to link.
    • +
    -

    Use the unlink subcommand to remove the link between two environments. +

    Use the unlink subcommand to remove the link between two environments. This command is idempotent; it will succeed even if the link does not exist.

    Usage#

    -

    -
    -
    l0 environment unlink sourceEnvironmentName destEnvironmentName
    -
    -

    +
    l0 environment unlink sourceEnvironmentName destEnvironmentName
    +
    + +

    Required parameters#

    -

    -
    -
    sourceEnvironmentName
    -
    The name of the first environment to unlink.
    -
    -
    -
    destEnvironmentName
    -
    The name of the second environment to unlink.
    -
    -

    +
      +
    • sourceEnvironmentName - The name of the source environment to unlink.
    • +
    • destEnvironmentName - The name of the destination environment to unlink.
    • +

    Job#

    A Job is a long-running unit of work performed on behalf of the Layer0 API. -Jobs are executed as Layer0 tasks that run in the api Environment. -The job command is used with the following subcommands: logs, delete, get, and list.

    +Jobs are executed as Layer0 tasks that run in the api environment. +The job command is used with the following subcommands: logs, delete, get, and list.

    job logs#

    -

    Use the logs subcommand to display the logs from a Layer0 job that is currently running.

    +

    Use the logs subcommand to display the logs from a Layer0 job that is currently running.

    Usage#

    -

    -
    -
    l0 job logs [--start MM/DD HH:MM] [--end MM/DD HH:MM] [--tail=N ] jobName
    -
    -

    +
    l0 job logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] jobName
    +
    + +

    Required parameters#

    -

    -
    -
    jobName
    -
    The name of the Layer0 job for which you want to view logs.
    -
    -

    -

    Optional arguments#

    -

    -
    -
    --start MM/DD HH:MM
    -
    The start of the time range to fetch logs.
    -
    -
    -
    --end MM/DD HH:MM
    -
    The end of the time range to fetch logs.
    -
    -
    -
    --tail=N
    -
    Display only the last N lines of the log.
    -
    -

    +
      +
    • jobName - The name of the Layer0 job for which you want to view logs.
    • +
    +

    Optional arguments#

    +
      +
    • --start MM/DD HH:MM - The start of the time range to fetch logs.
    • +
    • --end MM/DD HH:MM - The end of the time range to fetch logs.
    • +
    • --tail=N - Display only the last N lines of the log.
    • +

    job delete#

    -

    Use the delete subcommand to delete an existing job.

    +

    Use the delete subcommand to delete an existing job.

    Usage#

    -

    -
    -
    l0 job delete jobName
    -
    -

    +
    l0 job delete jobName
    +
    + +

    Required parameters#

    -

    -
    -
    jobName
    -
    The name of the job that you want to delete.
    -
    -

    +
      +
    • jobName - The name of the job that you want to delete.
    • +

    job get#

    -

    Use the get subcommand to display information about an existing Layer0 job.

    +

    Use the get subcommand to display information about an existing Layer0 job.

    Usage#

    -

    -
    -
    l0 job get jobName
    -
    -

    +
    l0 job get jobName
    +
    + +

    Required parameters#

    -

    -
    -
    jobName
    -
    The name of an existing Layer0 job.
    -
    -

    +
      +
    • jobName - The name of an existing Layer0 job to display.
    • +

    Additional information#

    -

    -
    -
    The get subcommand supports wildcard matching: l0 job get 2a55* would return all jobs beginning with 2a55.
    -
    -

    +

    The get subcommand supports wildcard matching: l0 job get 2a55* would return all jobs beginning with 2a55.

    job list#

    -

    Use the list subcommand to display information about all of the existing jobs in an instance of Layer0.

    +

    Use the list subcommand to display information about all of the existing jobs in an instance of Layer0.

    Usage#

    -

    -
    -
    l0 job list
    -
    -

    +
    l0 job list
    +
    + +
    -

    Loadbalancer#

    -

    A load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 services. The loadbalancer command is used with the following subcommands: create, delete, addport, dropport, get, list, and healthcheck.

    +

    Load Balancer#

    +

    A load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 services. The loadbalancer command is used with the following subcommands: create, delete, addport, dropport, get, list, and healthcheck.

    loadbalancer create#

    -

    Use the create subcommand to create a new load balancer.

    +

    Use the create subcommand to create a new load balancer.

    Usage#

    -

    -
    -
    l0 loadbalancer create [--port port --port port ...] [--certificate certificateName] [--private] [healthcheck-flags]environmentName loadBalancerName
    -
    -

    +
    l0 loadbalancer create [--port port ... | --certificate certifiateName | 
    +    --private | --healthcheck-target target | --healthcheck-interval interval | 
    +    --healthcheck-timeout timeout | --healthcheck-healthy-threshold healthyThreshold | 
    +    --healthcheck-unhealthy-threshold unhealthyThreshold] environmentName loadBalancerName
    +
    + +

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the existing Layer0 environment in which you want to create the load balancer.
    -
    -
    -
    loadBalancerName
    -
    A name for the load balancer.
    -
    -

    -

    Optional arguments#

    -

    -
    -
    - --port hostPort:containerPort/protocol -
    -
    -

    The port configuration for the load balancer. hostPort is the port on which the load balancer will listen for traffic; containerPort is the port that traffic will be forwarded to. You can specify multiple ports using --port xxx --port yyy. If this option is not specified, Layer0 will use the following configuration: 80:80/tcp

    -
    -
    -
    -
    - --certificate certificateName -
    -
    -

    The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.

    -
    -
    -
    -
    - --private -
    -
    -

    When you use this option, the load balancer will only be accessible from within the Layer0 environment.

    -
    -
    -
    -
    - --healthcheck-target target -
    -
    -

    The target of the check. Valid pattern is PROTOCOL:PORT/PATH (default: "TCP:80") -
    - If PROTOCOL is HTTP or HTTPS, both PORT and PATH are required -
    - - example: HTTP:80/admin/healthcheck -
    - If PROTOCOL is TCP or SSL, PORT is required and PATH is not supported -
    - - example: TCP:80

    -
    -
    -
    -
    - --healthcheck-interval interval -
    -
    -

    The interval between checks (default: 30).

    -
    -
    -
    -
    - --healthcheck-timeout timeout -
    -
    -

    The length of time before the check times out (default: 5).

    -
    -
    -
    -
    - --healthcheck-healthy-threshold healthyThreshold -
    -
    -

    The number of checks before the instance is declared healthy (default: 2).

    -
    -
    -
    -
    - --healthcheck-unhealthy-threshold unhealthyThreshold -
    -
    -

    The number of checks before the instance is declared unhealthy (default: 2).

    -
    -
    -

    -
    +
      +
    • environmentName - The name of the existing Layer0 environment in which you want to create the load balancer.
    • +
    • loadBalancerName - A name for the load balancer you are creating.
    • +
    +

    Optional arguments#

    +
      +
    • --port port ... - The port configuration for the listener of the load balancer. Valid pattern is hostPort:containerPort/protocol. Multiple ports can be specified using --port port1 --port port2 ... (default: 80/80:TCP).
        +
      • hostPort - The port that the load balancer will listen for traffic on.
      • +
      • containerPort - The port that the load balancer will forward traffic to.
      • +
      • protocol - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS).
      • +
      +
    • +
    • --certificate certificateName - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.
    • +
    • --private - When you use this option, the load balancer will only be accessible from within the Layer0 environment.
    • +
    • --healthcheck-target target - The target of the check. Valid pattern is PROTOCOL:PORT/PATH (default: "TCP:80").
        +
      • If PROTOCOL is HTTP or HTTPS, both PORT and PATH are required. Example: HTTP:80/admin/healthcheck.
      • +
      • If PROTOCOL is TCP or SSL, PORT is required and PATH is not used. Example: TCP:80
      • +
      +
    • +
    • --healthcheck-interval interval - The interval between checks (default: 30).
    • +
    • --healthcheck-timeout timeout - The length of time before the check times out (default: 5).
    • +
    • --healthcheck-healthy-threshold healthyThreshold - The number of checks before the instance is declared healthy (default: 2).
    • +
    • --healthcheck-unhealthy-threshold unhealthyThreshold - The number of checks before the instance is declared unhealthy (default: 2).
    • +
    +

    Ports and Health Checks

    -

    When both the --port and the --healthcheck-target options are omitted, Layer0 configures the load balancer with some default values: 80:80/tcp for ports and tcp:80 for healthcheck target. +

    When both the --port and the --healthcheck-target options are omitted, Layer0 configures the load balancer with some default values: 80:80/TCP for ports and TCP:80 for healthcheck target. These default values together create a load balancer configured with a simple but functioning health check, opening up a set of ports that allows traffic to the target of the healthcheck. -(--healthcheck-target tcp:80 tells the load balancer to ping its services at port 80 to determine their status, and --port 80:80/tcp configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services)

    +(--healthcheck-target TCP:80 tells the load balancer to ping its services at port 80 to determine their status, and --port 80:80/TCP configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services)

    When creating a load balancer with non-default configurations for either --port or --healthcheck-target, make sure that a valid --port and --healthcheck-target pairing is also created.

    loadbalancer delete#

    -

    Use the delete subcommand to delete an existing load balancer.

    +

    Use the delete subcommand to delete an existing load balancer.

    Usage#

    -

    -
    -
    l0 loadbalancer delete [--wait] loadBalancerName
    -
    -

    +
    l0 loadbalancer delete [--wait] loadBalancerName
    +
    + +

    Required parameters#

    -

    -
    -
    loadBalancerName
    -
    The name of the load balancer that you want to delete.
    -
    -

    -

    Optional arguments#

    -

    -
    -
    --wait
    -
    Wait until the deletion is complete before exiting.
    -
    -

    +
      +
    • loadBalancerName - The name of the load balancer that you want to delete.
    • +
    +

    Optional arguments#

    +
      +
    • --wait - Wait until the deletion is complete before exiting.
    • +

    Additional information#

    -

    -
    -
    In order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer.
    -

    -
    -
    This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.
    -
    -

    +

    In order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer.

    +

    This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed +.

    loadbalancer addport#

    -

    Use the addport subcommand to add a new port configuration to an existing Layer0 load balancer.

    +

    Use the addport subcommand to add a new port configuration to an existing Layer0 load balancer.

    Usage#

    -

    -
    -
    l0 loadbalancer addport loadBalancerName hostPort:containerPort/protocol [--certificate certificateName]
    -
    -

    +
    l0 loadbalancer addport [--certificate certificateName] loadBalancerName port
    +
    + +

    Required parameters#

    -

    -
    -
    loadBalancerName
    -
    The name of an existing Layer0 load balancer in which you want to add the port configuration.
    -
    -
    -
    hostPort
    -
    The port that the load balancer will listen on.
    -
    -
    -
    containerPort
    -
    The port that the load balancer will forward traffic to.
    -
    -
    -
    protocol
    -
    The protocol to use when forwarding traffic (acceptable values: tcp, ssl, http, and https).
    -
    -

    -

    Optional arguments#

    -

    -
    -
    --certificate certificateName
    -
    The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.
    -
    -

    +
      +
    • loadBalancerName - The name of an existing Layer0 load balancer in which you want to add the port configuration.
    • +
    • port - The port configuration for the listener of the load balancer. Valid pattern is hostPort:containerPort/protocol.
        +
      • hostPort - The port that the load balancer will listen for traffic on.
      • +
      • containerPort - The port that the load balancer will forward traffic to.
      • +
      • protocol - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS).
      • +
      +
    • +
    +

    Optional arguments#

    +
      +
    • --certificate certificateName - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.
    • +

    Additional information#

    -

    -
    -
    The port configuration you specify must not already be in use by the load balancer you specify.
    -
    -

    +

    The port configuration you specify must not already be in use by the load balancer you specify.

    loadbalancer dropport#

    -

    Use the dropport subcommand to remove a port configuration from an existing Layer0 load balancer.

    +

    Use the dropport subcommand to remove a port configuration from an existing Layer0 load balancer.

    Usage#

    -

    -
    -
    l0 loadbalancer dropport loadBalancerName hostPort
    -
    -

    +
    l0 loadbalancer dropport loadBalancerName hostPort
    +
    + +

    Required parameters#

    -

    -
    -
    loadBalancerName
    -
    The name of an existing Layer0 load balancer in which you want to remove the port configuration.
    -
    -
    -
    hostPort
    -
    The host port to remove from the load balancer.
    -
    -

    +
      +
    • loadBalancerName- The name of an existing Layer0 load balancer from which you want to remove the port configuration.
    • +
    • hostPort- The host port to remove from the load balancer.
    • +

    loadbalancer get#

    -

    Use the get subcommand to display information about an existing Layer0 load balancer.

    +

    Use the get subcommand to display information about an existing Layer0 load balancer.

    Usage#

    -

    -
    -
    l0 loadbalancer get environmentName:loadBalancerName
    -
    -

    +
    l0 loadbalancer get [environmentName:]loadBalancerName
    +
    + +

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of an existing Layer0 environment.
    -
    -
    -
    loadBalancerName
    -
    The name of an existing Layer0 load balancer.
    -
    -

    +
      +
    • [environmentName:]loadBalancerName - The name of an existing Layer0 load balancer. You can optionally provide the Layer0 environment (environmentName) associated with the Load Balancer
    • +

    Additional information#

    -

    -
    -
    The get subcommand supports wildcard matching: l0 loadbalancer get entrypoint* would return all jobs beginning with entrypoint.
    -
    -

    +

    The get subcommand supports wildcard matching: l0 loadbalancer get entrypoint* would return all jobs beginning with entrypoint.

    loadbalancer list#

    -

    Use the list subcommand to display information about all of the existing load balancers in an instance of Layer0.

    +

    Use the list subcommand to display information about all of the existing load balancers in an instance of Layer0.

    Usage#

    -

    -
    -
    l0 loadbalancer list
    -
    -

    +
    l0 loadbalancer list
    +
    + +

    loadbalancer healthcheck#

    -

    Use the healthcheck subcommand to display information about or update the configuration of a load balancer's health check.

    +

    Use the healthcheck subcommand to display information about or update the configuration of a load balancer's health check.

    Usage#

    -

    -
    -
    l0 loadbalancer healthcheck [healthcheck-flags] loadbalancerName
    -
    -

    -

    Optional arguments#

    -

    -
    -
    - --set-target target -
    -
    -

    The target of the check. Valid pattern is PROTOCOL:PORT/PATH, where PROTOCOL values are: -
    - HTTP or HTTPS: both PORT and PATH are required -
    - - example: HTTP:80/admin/healthcheck -
    - TCP or SSL: PORT is required, PATH is not supported -
    - - example: TCP:80

    -
    -
    -
    -
    - --set-interval interval -
    -
    -

    The interval between checks.

    -
    -
    -
    -
    - --set-timeout timeout -
    -
    -

    The length of time before the check times out.

    -
    -
    -
    -
    - --set-healthy-threshold healthyThreshold -
    -
    -

    The number of checks before the instance is declared healthy.

    -
    -
    -
    -
    - --set-unhealthy-threshold unhealthyThreshold -
    -
    -

    The number of checks before the instance is declared unhealthy.

    -
    -
    -

    -

    Additional information#

    -
    -
    -
    Calling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged.
    -
    -
    +
    l0 loadbalancer healthcheck [--set-target target | --set-interval interval | 
    +    --set-timeout timeout | --set-healthy-threshold healthyThreshold | 
    +    --set-unhealthy-threshold unhealthyThreshold] loadbalancerName
    +
    + +

    Required parameters#

    +
      +
    • loadBalancerName - The name of the existing Layer0 load balancer you are modifying.
    • +
    +

    Optional arguments#

    +
      +
    • --set-target target - The target of the check. Valid pattern is PROTOCOL:PORT/PATH.
        +
      • If PROTOCOL is HTTP or HTTPS, both PORT and PATH are required. Example: HTTP:80/admin/healthcheck.
      • +
      • If PROTOCOL is TCP or SSL, PORT is required and PATH is not used. Example: TCP:80
      • +
      +
    • +
    • --set-interval interval - The interval between health checks.
    • +
    • --set-timeout timeout - The length of time in seconds before the health check times out.
    • +
    • --set-healthy-threshold healthyThreshold - The number of checks before the instance is declared healthy.
    • +
    • --set-unhealthy-threshold unhealthyThreshold - The number of checks before the instance is declared unhealthy.
    • +
    +

    Additional information#

    +

    Calling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged.


    Service#

    A service is a component of a Layer0 environment. The purpose of a service is to execute a Docker image specified in a deploy. In order to create a service, you must first create an environment and a deploy; in most cases, you should also create a load balancer before creating the service.

    -

    The service command is used with the following subcommands: create, delete, get, update, list, logs, and scale.

    +

    The service command is used with the following subcommands: create, delete, get, update, list, logs, and scale.

    service create#

    -

    Use the create subcommand to create a Layer0 service.

    +

    Use the create subcommand to create a Layer0 service.

    Usage#

    -

    -
    -
    l0 service create [--loadbalancer environmentName:loadBalancerName ] [--no-logs] environmentName serviceName deployName:deployVersion
    -
    -

    -

    Required parameters#

    -

    -
    -
    serviceName
    -
    A name for the service that you are creating.
    -
    -
    -
    environmentName
    -
    The name of an existing Layer0 environment.
    -
    -
    -
    deployName
    -
    The name of a Layer0 deploy that exists in the environment environmentName.
    -
    -
    -
    deployVersion
    -
    The version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used.
    -
    -

    -

    Optional arguments#

    -

    -
    -
    --loadbalancer environmentName:loadBalancerName
    -
    Place the new service behind an existing load balancer named loadBalancerName in the environment named environmentName.
    -
    -
    -
    --no-logs
    -
    Disable cloudwatch logging for the service
    -
    -

    +
    l0 service create [--loadbalancer [environmentName:]loadBalancerName | 
    +    --no-logs] environmentName serviceName deployName[:deployVersion]
    +
    + + +

    Required parameters#

    +
      +
    • serviceName - A name for the service that you are creating.
    • +
    • environmentName - The name of an existing Layer0 environment.
    • +
    • deployName[:deployVersion] - The name of a Layer0 deploy that exists in the environment environmentName. You can optionally specify the version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used.
    • +
    +

    Optional arguments#

    +
      +
    • --loadbalancer [environmentName:]loadBalancerName - Place the new service behind an existing load balancer loadBalancerName. You can optionally specify the Layer0 environment (environmentName) where the load balancer exists.
    • +
    • --no-logs - Disable cloudwatch logging for the service
    • +

    service update#

    -

    Use the update subcommand to apply an existing Layer0 Deploy to an existing Layer0 service.

    +

    Use the update subcommand to apply an existing Layer0 Deploy to an existing Layer0 service.

    Usage#

    -

    -
    -
    l0 service update [--no-logs] environmentName:serviceName deployName:deployVersion
    -
    -

    -

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the Layer0 environment in which the service resides.
    -
    -
    -
    serviceName
    -
    The name of an existing Layer0 service into which you want to apply the deploy.
    -
    -
    -
    deployName
    -
    The name of the Layer0 deploy that you want to apply to the service.
    -
    -
    -
    deployVersion
    -
    The version of the Layer0 deploy that you want to apply to the service. If you do not specify a version number, the latest version of the deploy will be applied.
    -
    -
    -
    --no-logs
    -
    Disable cloudwatch logging for the service
    -
    -

    -

    Additional information#

    -
    -
    -
    If your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead.
    -
    -
    +
    l0 service update [--no-logs] [environmentName:]serviceName deployName[:deployVersion]
    +
    + +

    Required parameters#

    +
      +
    • [environmentName:]serviceName - The name of an existing Layer0 service into which you want to apply the deploy. You can optionally specify the Layer0 environment (environmentName) of the service.
    • +
    • deployName[:deployVersion] - The name of the Layer0 deploy that you want to apply to the service. You can optionally specify a specific version of the deploy (deployVersion). If you do not specify a version number, the latest version of the deploy will be applied.
    • +
    +

    Optional arguments#

    +
      +
    • --no-logs - Disable cloudwatch logging for the service
    • +
    +

    Additional information#

    +

    If your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead.

    service delete#

    -

    Use the delete subcommand to delete an existing Layer0 service.

    +

    Use the delete subcommand to delete an existing Layer0 service.

    Usage#

    -

    -
    -
    l0 service delete [--wait] environmentName:serviceName
    -
    -

    -

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the Layer0 environment that contains the service you want to delete.
    -
    -
    -
    serviceName
    -
    The name of the Layer0 service that you want to delete.
    -
    -

    +
    l0 service delete [--wait] [environmentName:]serviceName
    +
    + + +

    Required parameters#

    +
      +
    • [environmentName:]serviceName - The name of the Layer0 service that you want to delete. You can optionally provide the Layer0 environment (environmentName) of the service.
    • +

    Optional arguments#

    -

    -
    -
    --wait
    -
    Wait until the deletion is complete before exiting.
    -
    -

    +
      +
    • --wait - Wait until the deletion is complete before exiting.
    • +

    Additional information#

    -

    -
    -
    This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.
    -
    -

    +

    This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.

    service get#

    -

    Use the get subcommand to display information about an existing Layer0 service.

    +

    Use the get subcommand to display information about an existing Layer0 service.

    Usage#

    -

    -
    -
    l0 service get environmentName:serviceName
    -
    -

    -

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of an existing Layer0 environment.
    -
    -
    -
    serviceName
    -
    The name of an existing Layer0 service.
    -
    -

    +
    l0 service get [environmentName:]serviceName
    +
    + + +

    Required parameters#

    +
      +
    • [environmentName:]serviceName - The name of an existing Layer0 service. You can optionally provide the Layer0 environment (environmentName) of the service.
    • +

    service list#

    -

    Use the list subcommand to list all of the existing services in your Layer0 instance.

    +

    Use the list subcommand to list all of the existing services in your Layer0 instance.

    Usage#

    -

    -
    -
    l0 service list
    -
    -

    +
    l0 service get list
    +
    + +

    service logs#

    -

    Use the logs subcommand to display the logs from a Layer0 service that is currently running.

    +

    Use the logs subcommand to display the logs from a Layer0 service that is currently running.

    Usage#

    -

    -
    -
    l0 service logs [--start MM/DD HH:MM] [--end MM/DD HH:MM] [--tail=N ] serviceName
    -
    -

    -

    Required parameters#

    -

    -
    -
    serviceName
    -
    The name of the Layer0 service for which you want to view logs.
    -
    -

    +
    l0 service logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] serviceName
    +
    + + +

    Required parameters#

    +
      +
    • serviceName - The name of the Layer0 service for which you want to view logs.
    • +

    Optional arguments#

    -

    -
    -
    --start MM/DD HH:MM
    -
    The start of the time range to fetch logs.
    -
    -
    -
    --end MM/DD HH:MM
    -
    The end of the time range to fetch logs.
    -
    -
    -
    --tail=N
    -
    Display only the last N lines of the log.
    -
    -

    +
      +
    • --start MM/DD HH:MM - The start of the time range to fetch logs.
    • +
    • --end MM/DD HH:MM - The end of the time range to fetch logs.
    • +
    • --tail=N - Display only the last N lines of the log.
    • +

    service scale#

    -

    Use the scale subcommand to specify how many copies of an existing Layer0 service should run.

    +

    Use the scale subcommand to specify how many copies of an existing Layer0 service should run.

    Usage#

    -

    -
    -
    l0 service scale environmentName:serviceName N
    -
    -

    -

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the Layer0 environment that contains the service that you want to scale.
    -
    -
    -
    serviceName
    -
    The name of the Layer0 service that you want to scale up.
    -
    -
    -
    N
    -
    The number of copies of the specified service that should be run.
    -
    -

    +
    l0 service scale [environmentName:]serviceName copies
    +
    + + +

    Required parameters#

    +
      +
    • [environmentName:]serviceName - The name of the Layer0 service that you want to scale up. You can optionally provide the Layer0 environment (environmentName) of the service.
    • +
    • copies - The number of copies of the specified service that should be run.
    • +

    Task#

    A Layer0 task is a component of an environment. A task executes the contents of a Docker image, as specified in a deploy. A task differs from a service in that a task does not restart after exiting. Additionally, ports are not exposed when using a task.

    -

    The task command is used with the following subcommands: create, delete, get, list, and logs.

    +

    The task command is used with the following subcommands: create, delete, get, list, and logs.

    task create#

    -

    Use the create subcommand to create a Layer0 task.

    +

    Use the create subcommand to create a Layer0 task.

    Usage#

    -

    -
    -
    l0 task create [--no-logs] [--copies copies] environmentName taskName deployName
    -
    -

    -

    Required parameters#

    -

    -
    -
    environmentName
    -
    The name of the existing Layer0 environment in which you want to create the task.
    -
    -
    -
    taskName
    -
    A name for the task.
    -
    -
    -
    deployName
    -
    The name of an existing Layer0 deploy that the task should use.
    -
    -

    +
    l0 task create [--copies copies | --no-logs] environmentName taskName deployName
    +
    + + +

    Required parameters#

    +
      +
    • environmentName - The name of the existing Layer0 environment in which you want to create the task.
    • +
    • taskName - A name for the task.
    • +
    • deployName - The name of an existing Layer0 deploy that the task should use.
    • +

    Optional arguments#

    -

    -
    -
    --copies
    -
    The number of copies of the task to run (default: 1)
    -
    -
    -
    --no-logs
    -
    Disable cloudwatch logging for the service
    -
    -

    +
      +
    • --copies copies - The number of copies of the task to run (default: 1).
    • +
    • --no-logs - Disable cloudwatch logging for the service.
    • +

    task delete#

    -

    Use the delete subcommand to delete an existing Layer0 task.

    +

    Use the delete subcommand to delete an existing Layer0 task.

    Usage#

    -

    -
    -
    l0 task delete [environmentName:]taskName
    -
    -

    -

    Required parameters#

    -

    -
    -
    taskName
    -
    The name of the Layer0 task that you want to delete.
    -
    -

    -

    Optional parameters#

    -

    -
    -
    [environmentName:]
    -
    The name of the Layer0 environment that contains the task. This parameter is only necessary if multiple environments contain tasks with exactly the same name.
    -
    -

    +
    l0 task delete [environmentName:]taskName
    +
    + + +

    Required parameters#

    +
      +
    • [environmentName:]taskName - The name of the Layer0 task that you want to delete. You can optionally specify the name of the Layer0 environment that contains the task. This parameter is only required if mulitiple environments contain tasks with exactly the same name.
    • +

    Additional information#

    -

    -
    -
    Until the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour.
    -
    -

    +

    Until the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour.

    task get#

    -

    Use the get subcommand to display information about an existing Layer0 task (taskName).

    +

    Use the get subcommand to display information about an existing Layer0 task (taskName).

    Usage#

    -

    -
    -
    l0 task get [environmentName:]taskName
    -
    -

    -

    Required parameters#

    -

    -
    -
    taskName
    -
    The name of a Layer0 task for which you want to see information.
    -
    -

    +
    l0 task get [environmentName:]taskName
    +
    + + +

    Required parameters#

    +
      +
    • [environmentName:]taskName - The name of a Layer0 task for which you want to see information. You can optionally specify the name of the Layer0 Environment that contains the task.
    • +

    Additional information#

    -

    -
    -
    The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName, then information about all matching tasks will be returned.
    -
    -

    +

    The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName, then information about all matching tasks will be returned.

    task list#

    -

    Use the task subcommand to display a list of running tasks in your Layer0.

    +

    Use the task subcommand to display a list of running tasks in your Layer0.

    Usage#

    -

    -
    -
    l0 task list
    -
    -

    +
    l0 task list
    +
    + +

    task logs#

    -

    Use the logs subcommand to display logs for a running Layer0 task.

    +

    Use the logs subcommand to display logs for a running Layer0 task.

    Usage#

    -

    -
    -
    l0 task logs [--start MM/DD HH:MM] [--end MM/DD HH:MM] [--tail=N ] taskName
    -
    -

    -

    Required parameters#

    -

    -
    -
    taskName
    -
    The name of an existing Layer0 task.
    -
    -

    +
    l0 task logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] taskName
    +
    + + +

    Required parameters#

    +
      +
    • taskName - The name of an existing Layer0 task.
    • +

    Optional arguments#

    -

    -
    -
    --start MM/DD HH:MM
    -
    The start of the time range to fetch logs.
    -
    -
    -
    --end MM/DD HH:MM
    -
    The end of the time range to fetch logs.
    -
    -
    -
    --tail=N
    -
    Display only the last N lines of the log.
    -
    -

    +
      +
    • --start MM/DD HH:MM - The start of the time range to fetch logs.
    • +
    • --end MM/DD HH:MM - The end of the time range to fetch logs.
    • +
    • --tail=N - Display only the last N lines of the log.
    • +

    Additional information#

    -

    -
    -
    The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName, then information about all matching tasks will be returned.
    -
    -

    +

    The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName, then information about all matching tasks will be returned.

    task list#

    -

    Use the task subcommand to display a list of running tasks in your Layer0.

    +

    Use the list subcommand to display a list of running tasks in your Layer0.

    Usage#

    -

    -
    -
    l0 task list
    -
    -
    -=======

    - - -
    - - - - -
    - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/consul/index.html b/docs/reference/consul/index.html index 0981c6581..fa51315cc 100644 --- a/docs/reference/consul/index.html +++ b/docs/reference/consul/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Consul - Layer0 - + @@ -18,467 +18,726 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Consul - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - +
    + -
    +
  • + + Environment Variables + + +
  • + + + + + + +
  • + + Docker Socket Volume Definition + + +
  • + + + + + -
    -
    -
    +
    +
    + + -

    Consul reference#

    +
    +
    + + + + + +

    Consul reference#

    Consul is an open-source tool for discovering and configuring services in your network architecture. Specifically, Consul provides the following features:

    • Discovery of services
    • @@ -497,10 +756,9 @@

      RegistratorService Configuration#

      Layer0 Services that use Consul will need to add the Registrator and Consul Agent definitions to the containerDefinitions section of your Deploys. You must also add the Docker Socket definition to the volumes section of your Deploys.

      -

      For an example of a Deploy that uses Consul, see the Guestbook with Consul guide.


      Registrator Container Definition#

      -
      {
      +
      {
           "name": "registrator",
           "image": "gliderlabs/registrator:master",
           "essential": true,
      @@ -514,7 +772,8 @@ 

      Registrator Container Definition

      +}, +

    @@ -523,7 +782,7 @@

    Consul Agent Container DefinitionWarning

    You must replace <url> with your Layer0 Consul Load Balancer's

    -
    {
    +

    Environment Variables#

    @@ -564,87 +824,91 @@

    Environment VariablesDocker Socket Volume Definition#

    -
    "volumes": [
    +
    "volumes": [
         {
             "name": "dockersocket",
             "host": {
                     "sourcePath": "/var/run/docker.sock"
             }
         }
    -],
    - - -
    +], +
    + + + + + + - - - - - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/ecr/index.html b/docs/reference/ecr/index.html index 4c0d5ec2b..43868cece 100644 --- a/docs/reference/ecr/index.html +++ b/docs/reference/ecr/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - ECR - Layer0 - + @@ -18,489 +18,812 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + ECR - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - +
    +
    +
    + +
    -
    -
    +
  • + + Repository + + +
  • + +
  • + + Login + + +
  • + + + + + + +
  • + + Deploy Example + + + + +
  • + +
  • + + References + + +
  • + + + + + -
    -
    -
    +
    +
    + + -

    EC2 Container Registry#

    +
    +
    + + + + + +

    EC2 Container Registry#

    ECR is an Amazon implementation of a docker registry. It acts as a private registry in your AWS account, which can be accessed from any docker client, and Layer0. Consider using ECR if you have stability issues with hosted docker registries, and do not wish to share your images publicly on dockerhub.

    Setup#

    When interacting with ECR, you will first need to create a repository and a login to interact from your development machine.

    Repository#

    Each repository needs to be created by an AWS api call.

    -
      > aws ecr create-repository --repository-name myteam/myproject
    +
      > aws ecr create-repository --repository-name myteam/myproject
    +

    Login#

    To authenticate with the ECR service, Amazon provides the get-login command, which generates an authentication token, and returns a docker command to set it up

    -
      > aws ecr get-login
    +
      > aws ecr get-login
       # this command will return the following: (password is typically hundreds of characters)
    -  docker login -u AWS -p password -e none https://aws_account_id.dkr.ecr.us-east-1.amazonaws.com
    + docker login -u AWS -p password -e none https://aws_account_id.dkr.ecr.us-east-1.amazonaws.com +

    Execute the provided docker command to store the login credentials

    Afterward creating the repository and local login credentials you may interact with images (and tags) under this path from a local docker client.

    -
      docker pull ${ecr-url}/myteam/myproject
    -  docker push ${ecr-url}/myteam/myproject:custom-tag-1
    +
      docker pull ${ecr-url}/myteam/myproject
    +  docker push ${ecr-url}/myteam/myproject:custom-tag-1
    +

    Deploy Example#

    Here we'll walk through using ECR when deploying to Layer0, Using a very basic wait container.

    Make docker image#

    Your docker image can be built locally or pulled from dockerhub. For this example, we made a service that waits and then exits (useful for triggering regular restarts).

    -
    FROM busybox
    +
    FROM busybox
     
     ENV SLEEP_TIME=60
     
    -CMD sleep $SLEEP_TIME
    +CMD sleep $SLEEP_TIME +

    Then build the file, with the tag xfra/wait

    -
     > docker build -f Dockerfile.wait -t xfra/wait .
    +
     > docker build -f Dockerfile.wait -t xfra/wait .
    +

    Upload to ECR#

    After preparing a login and registry, tag the image with the remote url, and use docker push

    -
      docker tag xfra/wait 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait
    -  docker push 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait
    +
      docker tag xfra/wait 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait
    +  docker push 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait
    +
    @@ -508,7 +831,7 @@

    Upload to ECR

    Create a deploy#

    To run this image in Layer0, we create a dockerrun file, describing the instance and any additional variables

    -
    {
    +
    {
       "containerDefinitions": [
         {
           "name": "timeout",
    @@ -520,16 +843,19 @@ 

    Create a deploy l0 deploy create timeout.dockerrun.aws.json timeout

    +
      l0 deploy create timeout.dockerrun.aws.json timeout
    +

    Deploy#

    Finally, run that deploy as a service or a task. (the service will restart every 12 hours)

    -
      l0 service create demo timeoutsvc timeout:latest
    +
      l0 service create demo timeoutsvc timeout:latest
    +

    References#

    @@ -538,79 +864,82 @@

    Referencescreate-repository
  • get-login
  • - - -

    - - - - -

    - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/setup-cli/index.html b/docs/reference/setup-cli/index.html index 20dd08171..53f9a5ead 100644 --- a/docs/reference/setup-cli/index.html +++ b/docs/reference/setup-cli/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Layer0 Setup CLI - Layer0 - + @@ -18,619 +18,1319 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Layer0 Setup CLI - Layer0 + + + + + + - - - - - + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - + +
  • +
    +
    + + +
    +
    +
    + +
    -
    -
    +
  • + + Optional arguments + + +
  • + + + + + + +
  • + + Upgrade + + + + +
  • + +
  • + + Set + + + + +
  • + + + + + -
    -
    -
    +
    +
    + + -

    Layer0 Setup Reference#

    -

    The Layer0 Setup application (commonly called l0-setup), is used to provision, update, and destroy Layer0 instances.

    -
    -

    General Usage#

    -

    You can use the -h, --help command to get generate information about the l0-setup tool:

    +
    +
    + + + + + +

    Layer0 Setup Reference#

    +

    The Layer0 Setup application (commonly called l0-setup), is used for administrative tasks on Layer0 instances.

    +

    Global options#

    +

    l0-setup can be used with one of several commands: init, plan, apply, list, push, pull, endpoint, destroy, upgrade, and set. These commands are detailed in teh sections below. There are, however, some global paramters that you may specify whenever using l0-setup

    +

    Usage#

    +
    l0-setup [global options] command [command options] params
    +
    + + +

    Global options#

    +
      +
    • -l value, --log value - The log level to display on the console when you run commands. (default: info)
    • +
    • --version - Display the version number of the l0-setup application.
    • +

    Init#

    -

    The init command is used to initialize or reconfigure a Layer0 instance. +

    The init command is used to initialize or reconfigure a Layer0 instance. This command will prompt the user for inputs required to create/update a Layer0 instance. Each of the inputs can be specified through an optional flag.

    -

    Usage#

    -
    $ l0-setup init [options] <instance_name> 
    +

    Usage#

    +
    l0-setup init [--docker-path path | --module-source path | 
    +    --version version | --aws-region region | --aws-access-key accessKey | 
    +    --aws-secret-key secretKey] instanceName
    +
    -

    Options#

    -
      -
    • --docker-path - Path to docker config.json file. -This is used to include private Docker Registry authentication for this Layer0 instance.
    • -
    • --module-source - The source input variable is the path to the Terraform Layer0. -By default, this points to the Layer0 github repository. -Using values other than the default may result in undesired consequences.
    • -
    • --version - The version input variable specifies the tag to use for the Layer0 -Docker images: quintilesims/l0-api and quintilesims/l0-runner.
    • -
    • --aws-access-key - The access_key input variable is used to provision the AWS resources -required for Layer0. -This corresponds to the Access Key ID portion of an AWS Access Key. -It is recommended this key has the AdministratorAccess policy.
    • -
    • --aws-secret-key The secret_key input variable is used to provision the AWS resources -required for Layer0. -This corresponds to the Secret Access Key portion of an AWS Access Key. -It is recommended this key has the AdministratorAccess policy.
    • -
    • -

      --aws-region - The region input variable specifies which region to provision the -AWS resources required for Layer0. The following regions can be used:

      +

      Optional arguments#

        -
      • us-west-1
      • -
      • us-west-2
      • -
      • us-east-1
      • -
      • eu-west-1
      • -
      -
    • -
    • -

      --aws-ssh-key-pair - The ssh_key_pair input variable specifies the name of the -ssh key pair to include in EC2 instances provisioned by Layer0. -This key pair must already exist in the AWS account. -The names of existing key pairs can be found in the EC2 dashboard.

      -
    • +
    • --docker-path - Path to docker config.json file. This is used to include private Docker Registry authentication for this Layer0 instance.
    • +
    • --module-source - The source input variable is the path to the Terraform Layer0. By default, this points to the Layer0 github repository. Using values other than the default may result in undesired consequences.
    • +
    • --version - The version input variable specifies the tag to use for the Layer0 Docker images: quintilesims/l0-api and quintilesims/l0-runner.
    • +
    • --aws-ssh-key-pair - The ssh_key_pair input variable specifies the name of the ssh key pair to include in EC2 instances provisioned by Layer0. This key pair must already exist in the AWS account. The names of existing key pairs can be found in the EC2 dashboard.
    • +
    • --aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.
    • +
    • --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.

    Plan#

    -

    The plan command is used to show the planned operation(s) to run during the next apply on a Layer0 instance without actually executing any actions

    -

    Usage#

    -
    $ l0-setup plan <instance_name> 
    +

    The plan command is used to show the planned operation(s) to run during the next apply on a Layer0 instance without actually executing any actions

    +

    Usage#

    +
    l0-setup plan instanceName
    +
    -

    Options#

    -

    There are no options for this command


    Apply#

    -

    The apply command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the --push=false flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional --aws-* flags, are read from the environment variables or a credentials file.

    -

    Usage#

    -
    $ l0-setup apply [options] <instance_name> 
    +

    The apply command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the --push=false flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional --aws-* flags, are read from the environment variables or a credentials file.

    +

    Usage#

    +
    l0-setup apply [--quick | --push=false | --aws-access-key accessKey | 
    +    --aws-secret-key secretKey] instanceName
    +
    -

    Options#

    +

    Optional arguments#

    • --quick - Skips verification checks that normally run after terraform apply has completed
    • -
    • --push - Skips uploading local Layer0 configuration files to an S3 bucket
    • -
    • --aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-region - The region of the Layer0 instance. The default value is us-west-2.
    • +
    • --push=false - Skips uploading local Layer0 configuration files to an S3 bucket
    • +
    • --aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.
    • +
    • --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.

    List#

    -

    The list command is used to list local and remote Layer0 instances.

    -

    Usage#

    -
    $ l0-setup list [options]
    +

    The list command is used to list local and remote Layer0 instances.

    +

    Usage#

    +
    l0-setup list [--local=false | --remote=false | --aws-access-key accessKey | 
    +    --aws-secret-key secretKey]
    +
    -

    Options#

    +

    Optional arguments#

    • -l, --local - Show local Layer0 instances. This value is true by default.
    • -r, --remote - Show remote Layer0 instances. This value is true by default.
    • -
    • --aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to list S3 buckets. -If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to list S3 buckets. -If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-region - The region to list S3 buckets. The default value is us-west-2.

    Push#

    -

    The push command is used to back up your Layer0 configuration files to an S3 bucket.

    -

    Usage#

    -
    $ l0-setup push [options] <instance_name> 
    +

    The push command is used to back up your Layer0 configuration files to an S3 bucket.

    +

    Usage#

    +
    l0-setup push [--aws-access-key accessKey | 
    +    --aws-secret-key secretKey] instanceName
    +
    -

    Options#

    +

    Optional arguments#

      -
    • --aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to push to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-region - The region of the Layer0 instance. The default value is us-west-2.
    • +
    • --aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.
    • +
    • --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.

    Pull#

    -

    The pull command is used copy Layer0 configuration files from an S3 bucket.

    -

    Usage#

    -
    $ l0-setup pull [options] <instance_name> 
    +

    The pull command is used copy Layer0 configuration files from an S3 bucket.

    +

    Usage#

    +
    l0-setup pull [--aws-access-key accessKey | 
    +    --aws-secret-key secretKey] instanceName
    +
    -

    Options#

    +

    Optional arguments#

      -
    • --aws-access-key - The Access Key ID portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-secret-key - The Secret Access Key portion of an AWS Access Key that has permissions to pull to the Layer0 instances's S3 bucket. If not specified, the application will attempt to use any AWS credentials used by the AWS CLI.
    • -
    • --aws-region - The region of the Layer0 instance. The default value is us-west-2.
    • +
    • --aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.
    • +
    • --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.

    Endpoint#

    -

    The endpoint command is used to show environment variables used to connect to a Layer0 instance

    -

    Usage#

    -
    $ l0-setup endpoint [options] <instance_name> 
    +

    The endpoint command is used to show environment variables used to connect to a Layer0 instance

    +

    Usage#

    +
    l0-setup endpoint [-i | -d | -s syntax] instanceName
    +
    -

    Options#

    +

    Optional arguments#

    • -i, --insecure - Show environment variables that allow for insecure settings
    • -d, --dev - Show environment variables that are required for local development
    • @@ -639,122 +1339,126 @@

      OptionsDestroy#

      -

      The destroy command is used to destroy all resources associated with a Layer0 instance.

      -
      +

      The destroy command is used to destroy all resources associated with a Layer0 instance.

      +

      Caution

      -

      Destroying a Layer0 instance cannot be undone; if you created backups of your Layer0 configuration using the push command, those backups will also be deleted when you run the destroy command.

      +

      Destroying a Layer0 instance cannot be undone. If you created backups of your Layer0 configuration using the push command, those backups will also be deleted when you run the destroy command.

      -

      Usage#

      -
      $ l0-setup destroy [options] <instance_name> 
      +

      Usage#

      +
      l0-setup destroy [--force] instanceName
      +
      -

      Options#

      +

      Optional arguments#

      • --force - Skips confirmation prompt

      Upgrade#

      -

      The upgrade command is used to upgrade a Layer0 instance to a new version. -You will need to run an apply after this command has completed.

      -

      Usage#

      -
      $ l0-setup upgrade [options] <instance_name> <version>
      +

      The upgrade command is used to upgrade a Layer0 instance to a new version. +You will need to run an apply after this command has completed.

      +

      Usage#

      +
      l0-setup upgrade [--force] instanceName version
      +
      -

      Options#

      +

      Optional arguments#

      • --force - Skips confirmation prompt

      Set#

      -

      The set command is used set input variable(s) for a Layer0 instance's Terraform module. -This command can be used to shorthand the init and upgrade commands, -and can also be used with custom Layer0 modules. -You will need to run an apply after this command has completed.

      -

      Usage#

      -
      $ l0-setup set [options] <instance_name>
      +

      The set command is used set input variable(s) for a Layer0 instance's Terraform module. +This command can be used to shorthand the init and upgrade commands, and can also be used with custom Layer0 modules. +You will need to run an apply after this command has completed.

      +

      Usage#

      +
      l0-setup set [--input key=value] instanceName
      +
      -

      Example Usage

      -
      $ l0-setup set --input username=admin --input password=pass123 mylayer0
      - - -

      Options#

      +

      Options#

        -
      • --input - Specify an input using key=val format
      • +
      • --input key=val - Specify an input using key=val format
      - - -
    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/task_definition/index.html b/docs/reference/task_definition/index.html index ad88f04be..cb923ef92 100644 --- a/docs/reference/task_definition/index.html +++ b/docs/reference/task_definition/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Task Definitions - Layer0 - + @@ -18,437 +18,630 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Task Definitions - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    Task Definitions#

    +
    +
    + + + + + +

    Task Definitions#

    This guide gives some overview into the composition of a task definition. For more comprehensive documentation, we recommend taking a look at the official AWS docs:

    Sample#

    -

    The following snippet contains the task definition for the Guestbook application

    -
    {
    +

    The following snippet contains the task definition for the Guestbook application

    +
    {
         "AWSEBDockerrunVersion": 2,
         "containerDefinitions": [
             {
    @@ -473,7 +666,8 @@ 

    SampleSample - - Documentation built with - MkDocs - using the - - Material - - theme. - - -

    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/terraform-plugin/index.html b/docs/reference/terraform-plugin/index.html index 4f0ac2f37..f65ab5dfa 100644 --- a/docs/reference/terraform-plugin/index.html +++ b/docs/reference/terraform-plugin/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Layer0 Terraform Plugin - Layer0 - + @@ -18,522 +18,1331 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Layer0 Terraform Plugin - Layer0 + + + + + + - - - - - + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + +
    +
    +
    + + +
    +
    +
    + + + + +
  • + + Deploy Resource + + + + +
  • +
  • + + Environment Resource + + +
  • - - - -
  • - Reference - + + +
  • +
  • + + Load Balancer Resource + + + + +
  • +
  • + + Service Resource + + + + +
  • - - - - - -
  • - Troubleshooting - -
  • - - - - -
    - The author -
      - - -
    - -
    -
    -
    + -
    -
    -
    +
    +
    + + -

    Layer0 Terraform Provider Reference#

    +
    +
    + + + + + +

    Layer0 Terraform Provider Reference#

    Terraform is an open-source tool for provisioning and managing infrastructure. -If you are new to Terraform, we recommend checking out their documentation.

    -

    Layer0 has built a custom provider for Layer0. +If you are new to Terraform, we recommend checking out their documentation.

    +

    Layer0 has built a custom provider for Layer0. This provider allows users to create, manage, and update Layer0 entities using Terraform.

    Prerequisites#

      -
    • Terraform v0.9.4+ (download), accessible in your system path.
    • +
    • Terraform v0.11+ (download), accessible in your system path.

    Install#

    Download a Layer0 v0.8.4+ release. @@ -542,7 +1351,7 @@

    Installhere.

    Getting Started#

      -
    • Checkout the Terraform section of the Guestbook walkthrough here.
    • +
    • Checkout the Terraform section of the Guestbook walkthrough here.
    • We've added some tips and links to helpful resources in the Best Practices section below.

    @@ -550,24 +1359,25 @@

    ProviderExample Usage#

    -
    # Add 'endpoint' and 'token' variables
    +
    # Add 'endpoint' and 'token' variables
     variable "endpoint" {}
     
     variable "token" {}
     
     # Configure the layer0 provider
     provider "layer0" {
    -  endpoint        = "${var.endpoint}"
    -  token           = "${var.token}"
    +  endpoint        = "${var.endpoint}"
    +  token           = "${var.token}"
       skip_ssl_verify = true
    -}
    +} +

    Argument Reference#

    The following arguments are supported:

    -

    Configuration

    -

    The endpoint and token variables for your layer0 api can be found using the l0-setup endpoint command

    +

    Note

    +

    The endpoint and token variables for your layer0 api can be found using the l0-setup endpoint command

    • endpoint - (Required) The endpoint of the layer0 api
    • @@ -578,13 +1388,14 @@

      Argument ReferenceAPI Data Source#

      The API data source is used to extract useful read-only variables from the Layer0 API.

      Example Usage#

      -
      # Configure the api data source
      +
      # Configure the api data source
       data "layer0_api" "config" {}
       
       # Output the layer0 vpc id
       output "vpc id" {
      -  val = "${data.layer0_api.config.vpc_id}"
      -}
      + val = "${data.layer0_api.config.vpc_id}" +} +

      Attribute Reference#

      @@ -599,7 +1410,7 @@

      Attribute ReferenceDeploy Data Source#

      The Deploy data source is used to extract Layer0 Deploy attributes.

      Example Usage#

      -
      # Configure the deploy data source
      +
      # Configure the deploy data source
       data "layer0_deploy" "dpl" {
         name    = "my-deploy"
         version = "1"
      @@ -607,8 +1418,9 @@ 

      Example Usage${data.layer0_deploy.dpl.id}" +} +

      Argument Reference#

      @@ -628,15 +1440,16 @@

      Attribute ReferenceEnvironment Data Source#

      The Environment data source is used to extract Layer0 Environment attributes.

      Example Usage#

      -
      # Configure the environment data source
      +
      # Configure the environment data source
       data "layer0_environment" "env" {
         name = "my-environment"
       }
       
       # Output the layer0 environment id
       output "environment_id" {
      -  val = "${data.layer0_environment.env.id}"
      -}
      + val = "${data.layer0_environment.env.id}" +} +

      Argument Reference#

      @@ -658,16 +1471,17 @@

      Attribute ReferenceLoad Balancer Data Source#

      The Load Balancer data source is used to extract Layer0 Load Balancer attributes.

      Example Usage#

      -
      # Configure the load balancer source
      +
      # Configure the load balancer source
       data "layer0_load_balancer" "lb" {
         name           = "my-loadbalancer"
      -  environment_id = "${data.layer0_environment.env.environment_id}"
      +  environment_id = "${data.layer0_environment.env.environment_id}"
       }
       
       # Output the layer0 load balancer id
       output "load_balancer_id" {
      -  val = "${data.layer0_load_balancer.lb.id}"
      -}
      + val = "${data.layer0_load_balancer.lb.id}" +} +

      Argument Reference#

      @@ -690,16 +1504,17 @@

      Attribute ReferenceService Data Source#

      The Service data source is used to extract Layer0 Service attributes.

      Example Usage#

      -
      # Configure the service data source
      +
      # Configure the service data source
       data "layer0_service" "svc" {
         name           = "my-service"
      -  environment_id = "${data.layer0_environment.env.environment_id}"
      +  environment_id = "${data.layer0_environment.env.environment_id}"
       }
       
       # Output the layer0 service id
       output "service_id" {
      -  val = "${data.layer0_service.svc.id}"
      -}
      + val = "${data.layer0_service.svc.id}" +} +

      Argument Reference#

      @@ -721,11 +1536,11 @@

      Attribute ReferenceDeploy Resource#

      Provides a Layer0 Deploy.

      Performing variable substitution inside of your deploy's json file (typically named Dockerrun.aws.json) can be done through Terraform's template_file. -For a working example, please see the sample Guestbook application

      +For a working example, please see the sample Guestbook application

      Example Usage#

      -
      # Configure the deploy template
      +
      # Configure the deploy template
       data "template_file" "guestbook" {
      -  template = "${file("Dockerrun.aws.json")}"
      +  template = "${file("Dockerrun.aws.json")}"
         vars {
           docker_image_tag = "latest"
         }
      @@ -734,8 +1549,9 @@ 

      Example Usage${data.template_file.guestbook.rendered}" +} +

      Argument Reference#

      @@ -755,7 +1571,7 @@

      Attribute ReferenceEnvironment Resource#

      Provides a Layer0 Environment

      Example Usage#

      -
      # Create a new environment
      +
      # Create a new environment
       resource "layer0_environment" "demo" {
         name      = "demo"
         size      = "m3.medium"
      @@ -763,7 +1579,8 @@ 

      Example UsageArgument Reference#

      @@ -795,7 +1612,7 @@

      Attribute ReferenceLoad Balancer Resource#

      Provides a Layer0 Load Balancer

      Example Usage#

      -
      # Create a new load balancer
      +
      # Create a new load balancer
       resource "layer0_load_balancer" "guestbook" {
         name        = "guestbook"
         environment = "demo123"
      @@ -821,7 +1638,8 @@ 

      Example UsageArgument Reference#

      @@ -831,7 +1649,7 @@

      Argument Reference +
    • health_check - (Optional, Default: {"TCP:80" 30 5 2 2}) A health_check block. Health check documented below

    Ports (port) support the following:

    - - - - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/terraform_introduction/index.html b/docs/reference/terraform_introduction/index.html index c2917e571..f590afb19 100644 --- a/docs/reference/terraform_introduction/index.html +++ b/docs/reference/terraform_introduction/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Terraform - Layer0 - + @@ -18,455 +18,672 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Terraform - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    Introduction to Terraform#

    +
    +
    + + + + + +

    Introduction to Terraform#

    What does Terraform do?#

    Terraform is a powerful orchestration tool for creating, updating, deleting, and otherwise managing infrastructure in an easy-to-understand, declarative manner. Terraform's documentation is very good, but at a glance:

    @@ -493,79 +710,82 @@

    Advantages Versus Layer0 CLI?provider, but also gain the ability to orchestrate resources and tools beyond the CLI's scope.

    How do I get Terraform?#

    Check out Terraform's documentation on the subject.

    - - -
    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/reference/updateservice/index.html b/docs/reference/updateservice/index.html index 36e3f2d52..7409865a7 100644 --- a/docs/reference/updateservice/index.html +++ b/docs/reference/updateservice/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Updating a Service - Layer0 - + @@ -18,551 +18,810 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Updating a Service - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - + + -

    Updating a Layer0 service#

    +
    +
    + + + + + +

    Updating a Layer0 service#

    There are three methods of updating an existing Layer0 service. The first method is to update the existing Deploy to refer to a new Docker task definition. The second method is to create a new Service that uses the same Loadbalancer. The third method is to create both a new Loadbalancer and a new Service.

    There are advantages and disadvantages to each of these methods. The following sections discuss the advantages and disadvantages of using each method, and include procedures for implementing each method.

    Method 1: Refer to a new task definition#

    This method of updating a Layer0 application is the easiest to implement, because you do not need to rescale the Service or modify the Loadbalancer. This method is completely transparent to all other components of the application, and using this method does not involve any downtime.

    The disadvantage of using this method is that you cannot perform A/B testing of the old and new services, and you cannot control which traffic goes to the old service and which goes to the new one.

    To replace a Deploy to refer to a new task definition:

    -
      -
    1. At the command line, type the following to create a new Deploy:
      l0 deploy create [pathToTaskDefinition] [deployName]
      Note that if [deployName] already exists, this step will create a new version of that Deploy.
    2. -
    3. Type the following to update the existing Service:
      l0 service update [existingServiceName] [deployName]
      By default, the Service you specify in this command will refer to the latest version of [deployName], if multiple versions of the Deploy exist.

      Note


      If you want to refer to a specific version of the Deploy, type the following command instead of the one shown above: l0 service update [serviceName] [deployName]:[deployVersion]

    4. -
    +

    At the command line, type the following to create a new Deploy:

    +
    l0 deploy create taskDefPath deployName
    +
    + + +

    taskDefPath is the path to the ECS Task Definition. Note that if deployName already exists, this step will create a new version of that Deploy.

    +

    Use l0 service update to update the existing service:

    +
    l0 service update serviceName deployName[:deployVersion]
    +
    + + +

    By default, the service name you specify in this command will refer to the latest version of deployName. You can optionally specify a specific version of the deploy, as shown above.

    Method 2: Create a new Deploy and Service using the same Loadbalancer#

    -

    This method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the l0 service scale command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates.

    +

    This method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the l0 service scale command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates.

    The disadvantage of using this method is that you cannot control the routing of traffic between the old and new versions of the application.

    To create a new Deploy and Service:

    -
      -
    1. At the command line, type the following to create a new Deploy (or a new version of the Deploy, if [deployName] already exists):
      l0 deploy create [pathToTaskDefinition] [deployName]
    2. -
    3. Type the following command to create a new Service that refers to [deployName] behind an existing Loadbalancer named [loadbalancerName]:
      l0 service create --loadbalancer [loadbalancerName] [environmentName] [deployName]
    4. -
    5. Check to make sure that the new Service is working as expected. If it is, and you do not want to keep the old Service, type the following command to delete the old Service: l0 service delete [oldServiceName]
    6. -
    +

    At the command line, type the following to create a new deploy or a new version of a deploy:

    +
    l0 deploy create taskDefPath deployName
    +
    + + +

    taskDefPath is the path to the ECS Task Definition. Note that if deployName already exists, this step will create a new version of that Deploy.

    +

    Use l0 service create to create a new service that uses deployName behind an existing load balancer named loadBalancerName

    +
    l0 service create --loadbalancer [environmentName:]loadBalancerName environmentName serviceName deployName[:deployVersion]
    +
    + + +

    By default, the service name you specify in this command will refer to the latest version of deployName. You can optionally specify a specific version of the deploy, as shown above. You can also optionally specify the name of the environment, environmentName where the load balancer exists.

    +

    Check to make sure that the new service is working as expected. If it is, and you do not want to keep the old service, delete the old service:

    +
    l0 service delete service
    +
    + +

    Method 3: Create a new Deploy, Loadbalancer and Service#

    -

    The final method of updating a Layer0 service is to create an entirely new Deploy, Loadbalancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services.

    -

    The disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Loadbalancer.

    -

    To create a new Deploy, Loadbalancer and Service:

    -
      -
    1. At the command line, type the following command to create a new Deploy:
      l0 deploy create [pathToTaskDefinition] [deployName]
    2. -
    3. Type the following command to create a new Loadbalancer:
      l0 loadbalancer create --port [portNumber] [environmentName] [loadbalancerName] [deployName]

      Note


      The value of [loadbalancerName] in the above command must be unique.

    4. -
    5. Type the following command to create a new Service:
      l0 service create --loadbalancer [loadBalancerName] [environmentName] [serviceName] [deployName]

      Note


      The value of [serviceName] in the above command must be unique.

    6. -
    7. Implement a method of routing traffic between the old and new Services, such as HAProxy or Consul.
    8. -
    - - -
    +

    The final method of updating a Layer0 service is to create an entirely new Deploy, Load Balancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services.

    +

    The disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Load Balancer.

    +

    To create a new Deploy, Load Balancer and Service:

    +

    Type the following command to create a new Deploy:

    +
    l0 deploy create taskDefPath deployName
    +
    + + +

    taskDefPath is the path to the ECS Task Definition. Note that if deployName already exists, this step will create a new version of that Deploy.

    +

    Use l0 loadbalancer create to create a new Load Balancer:

    +
    l0 loadbalancer create --port port environmentName loadBalancerName deployName
    +
    + + +
      +
    • port is the port configuration for the listener of the Load Balancer. Valid pattern is hostPort:containerPort/protocol. Multiple ports can be specified using --port port1 --port port2 ....
        +
      • hostPort - The port that the load balancer will listen for traffic on.
      • +
      • containerPort - The port that the load balancer will forward traffic to.
      • +
      • protocol - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS).
      • +
      +
    • +
    +
    +

    Note

    +

    The value of loadbalancerName in the above command must be unique to the Environment.

    +
    +

    Use l0 service create to create a new Service using the Load Balancer you just created:

    +
    l0 service create --loadbalancer loadBalancerName environmentName serviceName deployName
    +
    + + +
    +

    Note

    +

    The value of serviceName in the above command must be unique to the Environment.

    +
    +

    Implement a method of routing traffic between the old and new Services, such as HAProxy or Consul.

    + + + + -
    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/releases/index.html b/docs/releases/index.html index 5ddd5e606..73f5c2c34 100644 --- a/docs/releases/index.html +++ b/docs/releases/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Releases - Layer0 - + @@ -18,427 +18,572 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Releases - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - - - - + + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/search/search_index.json b/docs/search/search_index.json new file mode 100644 index 000000000..87139deb3 --- /dev/null +++ b/docs/search/search_index.json @@ -0,0 +1,1724 @@ +{ + "docs": [ + { + "location": "/", + "text": "Build, Manage, and Deploy Your Application\n#\n\n\n\n\nMeet Layer0\n#\n\n\nLayer0 is a framework that helps you deploy web applications to the cloud with minimal fuss. Using a simple command line interface (CLI), you can manage the entire life cycle of your application without having to focus on infrastructure.\n\n\nReady to learn more about Layer0? See our \nintroduction page\n to learn about some important concepts. When you're ready to get started, take a look at the \ninstallation page\n for information about setting up Layer0.\n\n\nDownload\n#\n\n\n\n\n\n\n\n\nDownload \nv0.10.4\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\n\n\nContact Us\n#\n\n\nIf you have questions about Layer0, email the development team at \ncarbon@us.imshealth.com\n.", + "title": "Home" + }, + { + "location": "/#build-manage-and-deploy-your-application", + "text": "", + "title": "Build, Manage, and Deploy Your Application" + }, + { + "location": "/#meet-layer0", + "text": "Layer0 is a framework that helps you deploy web applications to the cloud with minimal fuss. Using a simple command line interface (CLI), you can manage the entire life cycle of your application without having to focus on infrastructure. Ready to learn more about Layer0? See our introduction page to learn about some important concepts. When you're ready to get started, take a look at the installation page for information about setting up Layer0.", + "title": "Meet Layer0" + }, + { + "location": "/#download", + "text": "Download v0.10.4 macOS Linux Windows", + "title": "Download" + }, + { + "location": "/#contact-us", + "text": "If you have questions about Layer0, email the development team at carbon@us.imshealth.com .", + "title": "Contact Us" + }, + { + "location": "/releases/", + "text": "Version\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\n\n\n\n\nv0.10.4\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.10.3\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.10.2\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.10.1\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.10.0\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.9.0\n\n\nmacOS\n\n\nLinux\n\n\nWindows\n\n\n\n\n\n\nv0.8.4\n\n\nmacOS\n\n\nLinux\n\n\nWindows", + "title": "Releases" + }, + { + "location": "/intro/", + "text": "Layer0 Introduction\n#\n\n\nIn recent years, the process of deploying applications has seen incredible innovation. However, this innovation has taken a somewhat simple task and made it into something quite \ncomplicated\n. Cloud providers, load balancing, virtual servers, IP subnets, and a continuing list of technological considerations are not only required to be understood, but their creation and management must be automated for a modern application to be successful at scale.\n\n\nThe burden of understanding a complicated and ever-growing infrastructure is a large aspect of what Layer0 is trying to fix. We've already done the leg work for huge swathes of your backend infrastructure, and we've made it easy to tear down and start over again, too. Meanwhile, you can develop locally using \nDocker\n and be assured that your application will properly translate to the cloud when you're ready to deploy.\n\n\nLayer0 requires a solid understanding of Docker to get the most out of it. We highly recommend starting with \nDocker's Understanding the Architecture\n to learn more about using Docker locally and in the cloud. We also recommend the \nTwelve-Factor App\n primer, which is a critical resource for understanding how to build a microservice.\n\n\n\n\nLayer0 Concepts\n#\n\n\nThe following concepts are core Layer0 abstractions for the technologies and features we use \nbehind the scenes\n. These terms will be used throughout our guides, so having a general understanding of them is helpful.\n\n\nCertificates\n#\n\n\nSSL certificates obtained from a valid \nCertificate Authority (CA)\n. You can use these certificates to secure your HTTPS services by applying them to your Layer0 load balancers.\n\n\nDeploys\n#\n\n\nECS Task Definitions\n. These configuration files detail how to deploy your application. We have several \nsample applications\n available that show what these files look like --- they're called \nDockerrun.aws.json\n within each sample app.\n\n\nTasks\n#\n\n\nManual one-off commands that don't necessarily make sense to keep running, or to restart when they finish. These run using Amazon's \nRunTask\n action (more info \nhere\n), and are \"ideally suited for processes such as batch jobs that perform work and then stop.\"\n\n\nLoad Balancers\n#\n\n\nPowerful tools that give you the basic building blocks for high-availability, scaling, and HTTPS. We currently use Amazon's \nElastic Load Balancing\n, and it pays to understand the basics of this service when working with Layer0.\n\n\nServices\n#\n\n\nYour running Layer0 applications. We also use the term \nservice\n for tools such as Consul, for which we provide a pre-built \nsample implementation\n using Layer0.\n\n\nEnvironments\n#\n\n\nLogical groupings of services. Typically, you would make a single environment for each tier of your application, such as \ndev\n, \nstaging\n, and \nprod\n.", + "title": "Introduction" + }, + { + "location": "/intro/#layer0-introduction", + "text": "In recent years, the process of deploying applications has seen incredible innovation. However, this innovation has taken a somewhat simple task and made it into something quite complicated . Cloud providers, load balancing, virtual servers, IP subnets, and a continuing list of technological considerations are not only required to be understood, but their creation and management must be automated for a modern application to be successful at scale. The burden of understanding a complicated and ever-growing infrastructure is a large aspect of what Layer0 is trying to fix. We've already done the leg work for huge swathes of your backend infrastructure, and we've made it easy to tear down and start over again, too. Meanwhile, you can develop locally using Docker and be assured that your application will properly translate to the cloud when you're ready to deploy. Layer0 requires a solid understanding of Docker to get the most out of it. We highly recommend starting with Docker's Understanding the Architecture to learn more about using Docker locally and in the cloud. We also recommend the Twelve-Factor App primer, which is a critical resource for understanding how to build a microservice.", + "title": "Layer0 Introduction" + }, + { + "location": "/intro/#layer0-concepts", + "text": "The following concepts are core Layer0 abstractions for the technologies and features we use behind the scenes . These terms will be used throughout our guides, so having a general understanding of them is helpful.", + "title": "Layer0 Concepts" + }, + { + "location": "/intro/#certificates", + "text": "SSL certificates obtained from a valid Certificate Authority (CA) . You can use these certificates to secure your HTTPS services by applying them to your Layer0 load balancers.", + "title": "Certificates" + }, + { + "location": "/intro/#deploys", + "text": "ECS Task Definitions . These configuration files detail how to deploy your application. We have several sample applications available that show what these files look like --- they're called Dockerrun.aws.json within each sample app.", + "title": "Deploys" + }, + { + "location": "/intro/#tasks", + "text": "Manual one-off commands that don't necessarily make sense to keep running, or to restart when they finish. These run using Amazon's RunTask action (more info here ), and are \"ideally suited for processes such as batch jobs that perform work and then stop.\"", + "title": "Tasks" + }, + { + "location": "/intro/#load-balancers", + "text": "Powerful tools that give you the basic building blocks for high-availability, scaling, and HTTPS. We currently use Amazon's Elastic Load Balancing , and it pays to understand the basics of this service when working with Layer0.", + "title": "Load Balancers" + }, + { + "location": "/intro/#services", + "text": "Your running Layer0 applications. We also use the term service for tools such as Consul, for which we provide a pre-built sample implementation using Layer0.", + "title": "Services" + }, + { + "location": "/intro/#environments", + "text": "Logical groupings of services. Typically, you would make a single environment for each tier of your application, such as dev , staging , and prod .", + "title": "Environments" + }, + { + "location": "/setup/install/", + "text": "Create a new Layer0 Instance\n#\n\n\nPrerequisites\n#\n\n\nBefore you can install and configure Layer0, you must obtain the following:\n\n\n\n\n\n\nAccess to an AWS account\n\n\n\n\n\n\nAn EC2 Key Pair\n\nThis key pair allows you to access the EC2 instances running your Services using SSH.\nIf you have already created a key pair, you can use it for this process.\nOtherwise, \nfollow the AWS documentation\n to create a new key pair.\nMake a note of the name that you selected when creating the key pair.\n\n\n\n\n\n\nTerraform v0.11+\n\nWe use Terraform to create the resources that Layer0 needs.\nIf you're unfamiliar with Terraform, you may want to check out our \nintroduction\n.\nIf you're ready to install Terraform, there are instructions in the \nTerraform documentation\n.\n\n\n\n\n\n\nPart 1: Download and extract Layer0\n#\n\n\n\n\nIn the \nDownloads section of the home page\n, select the appropriate installation file for your operating system. Extract the zip file to a directory on your computer.\n\n\n(Optional) Place the \nl0\n and \nl0-setup\n binaries into your system path. \nFor more information about adding directories to your system path, see the following resources:\n\n\n(Windows): \nHow to Edit Your System PATH for Easy Command Line Access in Windows\n\n\n(Linux/macOS): \nAdding a Directory to the Path\n\n\n\n\n\n\n\n\nPart 2: Create an Access Key\n#\n\n\nThis step will create an Identity \n Access Management (IAM) access key for your AWS account. \nYou will use the credentials created in this section when creating, updating, or removing Layer0 instances.\n\n\nTo create an Access Key:\n\n\n\n\n\n\nIn a web browser, login to the \nAWS Console\n.\n\n\n\n\n\n\nClick the \nServices\n dropdown menu in the upper left portion of the console page, then type \nIAM\n in the text box that appears at the top of the page after you click \nServices\n. As you type IAM, a search result will appear below the text box. Click on the IAM service result that appears below the text box.\n\n\n\n\n\n\nIn the left panel, click \nGroups\n, and then confirm that you have a group called \nAdministrators\n.\n\n\n\n\n\n\n\n\nIs the Administrators group missing in your AWS account?\n\n\nIf the \nAdministrators\n group does not already exist, complete the following steps:\n\n\n\n\n\n\nClick \nCreate New Group\n. Name the new group \nAdministrators\n, and then click \nNext Step\n.\n\n\n\n\n\n\nCheck the \nAdministratorAccess\n policy to attach the Administrator policy to your new group.\n\n\n\n\n\n\nClick \nNext Step\n, and then click \nCreate Group\n.\n\n\n\n\n\n\n\n\n\n\n\n\nIn the left panel, click \nUsers\n.\n\n\n\n\n\n\nClick the \nNew User\n button and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to \nProgrammatic access\n, and then click the \nNext: Permissions\n button.\n\n\n\n\n\n\nMake sure the \nAdd user to group\n button is highlighted. Find and check the box next to the group \nAdministrators\n. Click \nNext: Review\n button to continue. This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe!\n\n\n\n\n\n\nReview your choices and then click the \nCreate user\n button.\n\n\n\n\n\n\nOnce your user account has been created, click the \nDownload .csv\n button to save your access and secret key to a CSV file.\n\n\n\n\n\n\nPart 3: Create a new Layer0 Instance\n#\n\n\nNow that you have downloaded Layer0 and configured your AWS account, you can create your Layer0 instance.\nFrom a command prompt, run the following (replacing \ninstance_name\n with a name for your Layer0 instance):\n\n\nl0-setup init \ninstance_name\n\n\n\n\n\n\nThis command will prompt you for many different inputs. \nEnter the required values for \nAWS Access Key\n, \nAWS Secret Key\n, and \nAWS SSH Key\n as they come up.\nAll remaining inputs are optional and can be set to their default by pressing enter.\n\n\n...\nAWS Access Key: The access_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key.\nIt is recommended this key has the \nAdministratorAccess\n policy. Note that Layer0 will\nonly use this key for \nl0-setup\n commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: \nnone\n]\nPlease enter a value and press \nenter\n.\n Input: ABC123xzy\n\nAWS Secret Key: The secret_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key.\nIt is recommended this key has the \nAdministratorAccess\n policy. Note that Layer0 will\nonly use this key for \nl0-setup\n commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: \nnone\n]\nPlease enter a value and press \nenter\n.\n Input: ZXY987cba\n\nAWS SSH Key Pair: The ssh_key_pair input variable specifies the name of the\nssh key pair to include in EC2 instances provisioned by Layer0. This key pair must\nalready exist in the AWS account. The names of existing key pairs can be found\nin the EC2 dashboard. Note that changing this value will not effect instances\nthat have already been provisioned.\n\n[current: \nnone\n]\nPlease enter a value and press \nenter\n.\n Input: mySSHKey\n...\n\n\n\n\n\nOnce the \ninit\n command has successfully completed, you're ready to actually create the resources needed to use Layer0.\nRun the following command (again, replace \ninstance_name\n with the name you've chosen for your Layer0 instance):\n\n\nl0-setup apply \ninstance_name\n\n\n\n\n\n\nThe first time you run the \napply\n command, it may take around 5 minutes to complete. \nThis command is idempotent; it is safe to run multiple times if it fails the first.\n\n\nAt the end of the \napply\n command, your Layer0 instance's configuration and state will be automatically backed up to an S3 bucket. You can manually back up your configuration at any time using the \npush\n command. It's a good idea to run this command regularly (\nl0-setup push \ninstance_name\n) to ensure that your configuration is backed up.\nThese files can be downloaded at any time using the \npull\n command (\nl0-setup pull \ninstance_name\n).\n\n\n\n\nUsing a Private Docker Registry\n\n\nThe procedures in this section are optional, but are highly recommended for production use.\n\n\n\n\nIf you require authentication to a private Docker registry, you will need a Docker configuration file present on your machine with access to private repositories (typically located at \n~/.docker/config.json\n). \n\n\nIf you don't have a config file yet, you can generate one by running \ndocker login [registry-address]\n. \nA configuration file will be generated at \n~/.docker/config.json\n.\n\n\nTo add this authentication to your Layer0 instance, run:\n\n\nl0-setup init --docker-path=\npath/to/config.json\n \ninstance_name\n\n\n\n\n\n\nThis will reconfigure your Layer0 configuration and add a rendered file into your Layer0 instance's directory at \n~/.layer0/\ninstance_name\n/dockercfg.json\n.\n\n\nYou can modify a Layer0 instance's \ndockercfg.json\n file and re-run the \napply\n command (\nl0-setup apply \ninstance_name\n) to make changes to your authentication. \n\nNote:\n Any EC2 instances created prior to changing your \ndockercfg.json\n file will need to be manually terminated since they only grab the authentication file during instance creation. \nTerminated EC2 instances will be automatically re-created by autoscaling.\n\n\n\n\nUsing an Existing VPC\n\n\nThe procedures in this section must be followed precisely to properly install Layer0 into an existing VPC\n\n\n\n\nBy default, \nl0-setup\n creates a new VPC to place resources. \nHowever, \nl0-setup\n can place resources in an existing VPC if the VPC meets all of the following conditions:\n\n\n\n\nHas access to the public internet (through a NAT instance or gateway)\n\n\nHas at least 1 public and 1 private subnet\n\n\nThe public and private subnets have the tag \nTier: Public\n or \nTier: Private\n, respectively.\nFor information on how to tag AWS resources, please visit the \nAWS documentation\n. \n\n\n\n\nOnce you are sure the existing VPC satisfies these requirements, run the \ninit\n command, \nplacing the VPC ID when prompted:\n\n\nl0-setup init \ninstance_name\n\n...\nVPC ID (optional): The vpc_id input variable specifies an existing AWS VPC to provision\nthe AWS resources required for Layer0. If no input is specified, a new VPC will be\ncreated for you. Existing VPCs must satisfy the following constraints:\n\n - Have access to the public internet (through a NAT instance or gateway)\n - Have at least 1 public and 1 private subnet\n - Each subnet must be tagged with [\nTier\n: \nPrivate\n] or [\nTier\n: \nPublic\n]\n\nNote that changing this value will destroy and recreate any existing resources.\n\n[current: ]\nPlease enter a new value, or press \nenter\n to keep the current value.\n Input: vpc123\n\n\n\n\n\nOnce the command has completed, it is safe to run \napply\n to provision the resources. \n\n\nPart 4: Connect to a Layer0 Instance\n#\n\n\nOnce the \napply\n command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the \nendpoint\n command.\n\n\nl0-setup endpoint --insecure \ninstance_name\n\nexport LAYER0_API_ENDPOINT=\nhttps://l0-instance_name-api-123456.us-west-2.elb.amazonaws.com\n\nexport LAYER0_AUTH_TOKEN=\nabcDEFG123\n\nexport LAYER0_SKIP_SSL_VERIFY=\n1\n\nexport LAYER0_SKIP_VERSION_VERIFY=\n1\n\n\n\n\n\n\n\n\nDanger\n\n\nThe \n--insecure\n flag shows configurations that bypass SSL and version verifications. \nThis is required as the Layer0 API created uses a self-signed SSL certificate by default.\nThese settings are \nnot\n recommended for production use!\n\n\n\n\nThe \nendpoint\n command supports a \n--syntax\n option, which can be used to turn configuration into a single line:\n\n\n\n\nBash (default) - \neval \"$(l0-setup endpoint --insecure \ninstance_name\n)\"\n\n\nPowershell - \nl0-setup endpoint --insecure --syntax=powershell \ninstance_name\n | Out-String | Invoke-Expression", + "title": "Install" + }, + { + "location": "/setup/install/#create-a-new-layer0-instance", + "text": "", + "title": "Create a new Layer0 Instance" + }, + { + "location": "/setup/install/#prerequisites", + "text": "Before you can install and configure Layer0, you must obtain the following: Access to an AWS account An EC2 Key Pair \nThis key pair allows you to access the EC2 instances running your Services using SSH.\nIf you have already created a key pair, you can use it for this process.\nOtherwise, follow the AWS documentation to create a new key pair.\nMake a note of the name that you selected when creating the key pair. Terraform v0.11+ \nWe use Terraform to create the resources that Layer0 needs.\nIf you're unfamiliar with Terraform, you may want to check out our introduction .\nIf you're ready to install Terraform, there are instructions in the Terraform documentation .", + "title": "Prerequisites" + }, + { + "location": "/setup/install/#part-1-download-and-extract-layer0", + "text": "In the Downloads section of the home page , select the appropriate installation file for your operating system. Extract the zip file to a directory on your computer. (Optional) Place the l0 and l0-setup binaries into your system path. \nFor more information about adding directories to your system path, see the following resources: (Windows): How to Edit Your System PATH for Easy Command Line Access in Windows (Linux/macOS): Adding a Directory to the Path", + "title": "Part 1: Download and extract Layer0" + }, + { + "location": "/setup/install/#part-2-create-an-access-key", + "text": "This step will create an Identity Access Management (IAM) access key for your AWS account. \nYou will use the credentials created in this section when creating, updating, or removing Layer0 instances. To create an Access Key: In a web browser, login to the AWS Console . Click the Services dropdown menu in the upper left portion of the console page, then type IAM in the text box that appears at the top of the page after you click Services . As you type IAM, a search result will appear below the text box. Click on the IAM service result that appears below the text box. In the left panel, click Groups , and then confirm that you have a group called Administrators . Is the Administrators group missing in your AWS account? If the Administrators group does not already exist, complete the following steps: Click Create New Group . Name the new group Administrators , and then click Next Step . Check the AdministratorAccess policy to attach the Administrator policy to your new group. Click Next Step , and then click Create Group . In the left panel, click Users . Click the New User button and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to Programmatic access , and then click the Next: Permissions button. Make sure the Add user to group button is highlighted. Find and check the box next to the group Administrators . Click Next: Review button to continue. This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe! Review your choices and then click the Create user button. Once your user account has been created, click the Download .csv button to save your access and secret key to a CSV file.", + "title": "Part 2: Create an Access Key" + }, + { + "location": "/setup/install/#part-3-create-a-new-layer0-instance", + "text": "Now that you have downloaded Layer0 and configured your AWS account, you can create your Layer0 instance.\nFrom a command prompt, run the following (replacing instance_name with a name for your Layer0 instance): l0-setup init instance_name This command will prompt you for many different inputs. \nEnter the required values for AWS Access Key , AWS Secret Key , and AWS SSH Key as they come up.\nAll remaining inputs are optional and can be set to their default by pressing enter. ...\nAWS Access Key: The access_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key.\nIt is recommended this key has the AdministratorAccess policy. Note that Layer0 will\nonly use this key for l0-setup commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: none ]\nPlease enter a value and press enter .\n Input: ABC123xzy\n\nAWS Secret Key: The secret_key input variable is used to provision the AWS resources\nrequired for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key.\nIt is recommended this key has the AdministratorAccess policy. Note that Layer0 will\nonly use this key for l0-setup commands associated with this Layer0 instance; the\nLayer0 API will use its own key with limited permissions to provision AWS resources.\n\n[current: none ]\nPlease enter a value and press enter .\n Input: ZXY987cba\n\nAWS SSH Key Pair: The ssh_key_pair input variable specifies the name of the\nssh key pair to include in EC2 instances provisioned by Layer0. This key pair must\nalready exist in the AWS account. The names of existing key pairs can be found\nin the EC2 dashboard. Note that changing this value will not effect instances\nthat have already been provisioned.\n\n[current: none ]\nPlease enter a value and press enter .\n Input: mySSHKey\n... Once the init command has successfully completed, you're ready to actually create the resources needed to use Layer0.\nRun the following command (again, replace instance_name with the name you've chosen for your Layer0 instance): l0-setup apply instance_name The first time you run the apply command, it may take around 5 minutes to complete. \nThis command is idempotent; it is safe to run multiple times if it fails the first. At the end of the apply command, your Layer0 instance's configuration and state will be automatically backed up to an S3 bucket. You can manually back up your configuration at any time using the push command. It's a good idea to run this command regularly ( l0-setup push instance_name ) to ensure that your configuration is backed up.\nThese files can be downloaded at any time using the pull command ( l0-setup pull instance_name ). Using a Private Docker Registry The procedures in this section are optional, but are highly recommended for production use. If you require authentication to a private Docker registry, you will need a Docker configuration file present on your machine with access to private repositories (typically located at ~/.docker/config.json ). If you don't have a config file yet, you can generate one by running docker login [registry-address] . \nA configuration file will be generated at ~/.docker/config.json . To add this authentication to your Layer0 instance, run: l0-setup init --docker-path= path/to/config.json instance_name This will reconfigure your Layer0 configuration and add a rendered file into your Layer0 instance's directory at ~/.layer0/ instance_name /dockercfg.json . You can modify a Layer0 instance's dockercfg.json file and re-run the apply command ( l0-setup apply instance_name ) to make changes to your authentication. Note: Any EC2 instances created prior to changing your dockercfg.json file will need to be manually terminated since they only grab the authentication file during instance creation. \nTerminated EC2 instances will be automatically re-created by autoscaling. Using an Existing VPC The procedures in this section must be followed precisely to properly install Layer0 into an existing VPC By default, l0-setup creates a new VPC to place resources. \nHowever, l0-setup can place resources in an existing VPC if the VPC meets all of the following conditions: Has access to the public internet (through a NAT instance or gateway) Has at least 1 public and 1 private subnet The public and private subnets have the tag Tier: Public or Tier: Private , respectively.\nFor information on how to tag AWS resources, please visit the AWS documentation . Once you are sure the existing VPC satisfies these requirements, run the init command, \nplacing the VPC ID when prompted: l0-setup init instance_name \n...\nVPC ID (optional): The vpc_id input variable specifies an existing AWS VPC to provision\nthe AWS resources required for Layer0. If no input is specified, a new VPC will be\ncreated for you. Existing VPCs must satisfy the following constraints:\n\n - Have access to the public internet (through a NAT instance or gateway)\n - Have at least 1 public and 1 private subnet\n - Each subnet must be tagged with [ Tier : Private ] or [ Tier : Public ]\n\nNote that changing this value will destroy and recreate any existing resources.\n\n[current: ]\nPlease enter a new value, or press enter to keep the current value.\n Input: vpc123 Once the command has completed, it is safe to run apply to provision the resources.", + "title": "Part 3: Create a new Layer0 Instance" + }, + { + "location": "/setup/install/#part-4-connect-to-a-layer0-instance", + "text": "Once the apply command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the endpoint command. l0-setup endpoint --insecure instance_name \nexport LAYER0_API_ENDPOINT= https://l0-instance_name-api-123456.us-west-2.elb.amazonaws.com \nexport LAYER0_AUTH_TOKEN= abcDEFG123 \nexport LAYER0_SKIP_SSL_VERIFY= 1 \nexport LAYER0_SKIP_VERSION_VERIFY= 1 Danger The --insecure flag shows configurations that bypass SSL and version verifications. \nThis is required as the Layer0 API created uses a self-signed SSL certificate by default.\nThese settings are not recommended for production use! The endpoint command supports a --syntax option, which can be used to turn configuration into a single line: Bash (default) - eval \"$(l0-setup endpoint --insecure instance_name )\" Powershell - l0-setup endpoint --insecure --syntax=powershell instance_name | Out-String | Invoke-Expression", + "title": "Part 4: Connect to a Layer0 Instance" + }, + { + "location": "/setup/upgrade/", + "text": "Upgrade a Layer0 Instance\n#\n\n\nThis section provides procedures for upgrading your Layer0 installation to the latest version.\nThis assumes you are using Layer0 version \nv0.10.0\n or later. \n\n\n\n\nWarning\n\n\nLayer0 does not support updating MAJOR or MINOR versions in place unless explicitly stated otherwise.\nUsers will either need to create a new Layer0 instance and migrate to it or destroy and re-create their Layer0 instance in these circumstances.\n\n\n\n\nRun the \nupgrade\n command, replacing \ninstance_name\n and \nversion\n with the name of the Layer0 instance and new version, respectively:\n\n\nl0-setup upgrade \ninstance_name\n \nversion\n\n\n\n\n\n\nThis will prompt you about the updated \nsource\n and \nversion\n inputs changing. \nIf you are not satisfied with the changes, exit the application during the prompts. \nFor full control on changing inputs, use the \nset\n command. \n\n\nExample Usage\n\n\nl0-setup upgrade mylayer0 v0.10.1\n\nThis will update the \nversion\n input\n From: [v0.10.0]\n To: [v0.10.1]\n\n Press \nenter\n to accept this change:\nThis will update the \nsource\n input\n From: [github.com/quintilesims/layer0//setup/module?ref=v0.10.0]\n To: [github.com/quintilesims/layer0//setup/module?ref=v0.10.1]\n\n Press \nenter\n to accept this change:\n ...\n\nEverything looks good! You are now ready to run \nl0-setup apply mylayer0\n\n\n\n\n\n\nAs stated by the command output, run the \napply\n command to apply the changes to the Layer0 instance.\nIf any errors occur, please contact the Layer0 team.", + "title": "Upgrade" + }, + { + "location": "/setup/upgrade/#upgrade-a-layer0-instance", + "text": "This section provides procedures for upgrading your Layer0 installation to the latest version.\nThis assumes you are using Layer0 version v0.10.0 or later. Warning Layer0 does not support updating MAJOR or MINOR versions in place unless explicitly stated otherwise.\nUsers will either need to create a new Layer0 instance and migrate to it or destroy and re-create their Layer0 instance in these circumstances. Run the upgrade command, replacing instance_name and version with the name of the Layer0 instance and new version, respectively: l0-setup upgrade instance_name version This will prompt you about the updated source and version inputs changing. \nIf you are not satisfied with the changes, exit the application during the prompts. \nFor full control on changing inputs, use the set command. Example Usage l0-setup upgrade mylayer0 v0.10.1\n\nThis will update the version input\n From: [v0.10.0]\n To: [v0.10.1]\n\n Press enter to accept this change:\nThis will update the source input\n From: [github.com/quintilesims/layer0//setup/module?ref=v0.10.0]\n To: [github.com/quintilesims/layer0//setup/module?ref=v0.10.1]\n\n Press enter to accept this change:\n ...\n\nEverything looks good! You are now ready to run l0-setup apply mylayer0 As stated by the command output, run the apply command to apply the changes to the Layer0 instance.\nIf any errors occur, please contact the Layer0 team.", + "title": "Upgrade a Layer0 Instance" + }, + { + "location": "/setup/destroy/", + "text": "Destroying a Layer0 Instance\n#\n\n\nThis section provides procedures for destroying (deleting) a Layer0 instance.\n\n\nPart 1: Clean Up Your Layer0 Environments\n#\n\n\nIn order to destroy a Layer0 instance, you must first delete all environments in the instance.\nList all environments with:\n\n\nl0 environment list\n\n\n\n\n\nFor each environment listed in the previous step, with the exception of the environment named \napi\n, \nissue the following command (replacing \nenvironment_name\n with the name of the environment to delete):\n\n\nl0 environment delete --wait \nenvironment_name\n\n\n\n\n\n\nPart 2: Destroy the Layer0 Instance\n#\n\n\nOnce all environments have been deleted, the Layer0 instance can be deleted using the \nl0-setup\n tool. \nRun the following command (replacing \ninstance_name\n with the name of the Layer0 instance):\n\n\nl0-setup destroy \ninstance_name\n\n\n\n\n\n\nThe \ndestroy\n command is idempotent; if it fails, it is safe to re-attempt multiple times. \n\n\n\n\nNote\n\n\nIf the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources \nl0-setup\n is attempting to destroy. You will need to manually remove these dependencies in order to get the \ndestroy\n command to complete successfully.", + "title": "Destroy" + }, + { + "location": "/setup/destroy/#destroying-a-layer0-instance", + "text": "This section provides procedures for destroying (deleting) a Layer0 instance.", + "title": "Destroying a Layer0 Instance" + }, + { + "location": "/setup/destroy/#part-1-clean-up-your-layer0-environments", + "text": "In order to destroy a Layer0 instance, you must first delete all environments in the instance.\nList all environments with: l0 environment list For each environment listed in the previous step, with the exception of the environment named api , \nissue the following command (replacing environment_name with the name of the environment to delete): l0 environment delete --wait environment_name", + "title": "Part 1: Clean Up Your Layer0 Environments" + }, + { + "location": "/setup/destroy/#part-2-destroy-the-layer0-instance", + "text": "Once all environments have been deleted, the Layer0 instance can be deleted using the l0-setup tool. \nRun the following command (replacing instance_name with the name of the Layer0 instance): l0-setup destroy instance_name The destroy command is idempotent; if it fails, it is safe to re-attempt multiple times. Note If the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources l0-setup is attempting to destroy. You will need to manually remove these dependencies in order to get the destroy command to complete successfully.", + "title": "Part 2: Destroy the Layer0 Instance" + }, + { + "location": "/guides/walkthrough/intro/", + "text": "An Iterative Walkthrough\n#\n\n\nThis guide aims to take you through three increasingly-complex deployment examples using Layer0.\nSuccessive sections build upon the previous ones, and each deployment can be completed either through the Layer0 CLI directly, or through Terraform using our custom \nLayer0 Terraform Provider\n.\n\n\nWe assume that you're using Layer0 v0.9.0 or later.\nIf you have not already installed and configured Layer0, see the \ninstallation guide\n.\nIf you are running an older version of Layer0, you may need to \nupgrade\n.\n\n\nIf you intend to deploy services using the Layer0 Terraform Provider, you'll want to make sure that you've \ninstalled\n the provider correctly.\n\n\nRegardless of the deployment method you choose, we maintain a \nguides repository\n that you should clone/download.\nIt contains all the files you will need to progress through this walkthrough.\nAs you do so, we will assume that your working directory matches the part of the guide that you're following (for example, Deployment 1 of this guide will assume that your working directory is \n.../walkthrough/deployment-1/\n).\n\n\nTable of Contents\n:\n\n\n\n\nDeployment 1\n: Deploying a web service (Guestbook)\n\n\nDeployment 2\n: Deploying Guestbook and a data store service (Redis)\n\n\nDeployment 3\n: Deploying Guestbook, Redis, and a service discovery service (Consul)", + "title": "Walkthrough: Introduction" + }, + { + "location": "/guides/walkthrough/intro/#an-iterative-walkthrough", + "text": "This guide aims to take you through three increasingly-complex deployment examples using Layer0.\nSuccessive sections build upon the previous ones, and each deployment can be completed either through the Layer0 CLI directly, or through Terraform using our custom Layer0 Terraform Provider . We assume that you're using Layer0 v0.9.0 or later.\nIf you have not already installed and configured Layer0, see the installation guide .\nIf you are running an older version of Layer0, you may need to upgrade . If you intend to deploy services using the Layer0 Terraform Provider, you'll want to make sure that you've installed the provider correctly. Regardless of the deployment method you choose, we maintain a guides repository that you should clone/download.\nIt contains all the files you will need to progress through this walkthrough.\nAs you do so, we will assume that your working directory matches the part of the guide that you're following (for example, Deployment 1 of this guide will assume that your working directory is .../walkthrough/deployment-1/ ). Table of Contents : Deployment 1 : Deploying a web service (Guestbook) Deployment 2 : Deploying Guestbook and a data store service (Redis) Deployment 3 : Deploying Guestbook, Redis, and a service discovery service (Consul)", + "title": "An Iterative Walkthrough" + }, + { + "location": "/guides/walkthrough/deployment-1/", + "text": "Deployment 1: A Simple Guestbook App\n#\n\n\nIn this section you'll learn how different Layer0 commands work together to deploy applications to the cloud.\nThe example application in this section is a guestbook -- a web application that acts as a simple message board.\nYou can choose to complete this section using either \nthe Layer0 CLI\n or \nTerraform\n.\n\n\n\n\nDeploy with Layer0 CLI\n#\n\n\nIf you're following along, you'll want to be working in the \nwalkthrough/deployment-1/\n directory of your clone of the \nguides\n repo.\n\n\nFiles used in this deployment:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nGuestbook.Dockerrun.aws.json\n\n\nTemplate for running the Guestbook application\n\n\n\n\n\n\n\n\n\n\nPart 1: Create the Environment\n#\n\n\nThe first step in deploying an application with Layer0 is to create an environment.\nAn environment is a dedicated space in which one or more services can reside.\nHere, we'll create a new environment named \ndemo-env\n.\nAt the command prompt, execute the following:\n\n\nl0 environment create demo-env\n\n\nWe should see output like the following:\n\n\nENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\n\n\n\n\n\nWe can inspect our environments in a couple of different ways:\n\n\n\n\nl0 environment list\n will give us a brief summary of all environments:\n\n\n\n\nENVIRONMENT ID ENVIRONMENT NAME\ndemo00e6aa9 demo-env\napi api\n\n\n\n\n\n\n\nl0 environment get demo-env\n will show us more information about the \ndemo-env\n environment we just created:\n\n\n\n\nENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\n\n\n\n\n\n\n\nl0 environment get \\*\n illustrates wildcard matching (you could also have used \ndemo*\n in the above command), and it will return detailed information for \neach\n environment, not just one - it's like a detailed \nlist\n:\n\n\n\n\nENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\napi api 2 m3.medium\n\n\n\n\n\n\n\nPart 2: Create the Load Balancer\n#\n\n\nIn order to expose a web application to the public internet, we need to create a load balancer.\nA load balancer listens for web traffic at a specific address and directs that traffic to a Layer0 service.\n\n\nA load balancer also has a notion of a health check - a way to assess whether or not the service is healthy and running properly.\nBy default, Layer0 configures the health check of a load balancer based upon a simple TCP ping to port 80 every thirty seconds.\nAlso by default, this ping will timeout after five seconds of no response from the service, and two consecutive successes or failures are required for the service to be considered healthy or unhealthy.\n\n\nHere, we'll create a new load balancer named \nguestbook-lb\n inside of our environment named \ndemo-env\n.\nThe load balancer will listen on port 80, and forward that traffic along to port 80 in the Docker container using the HTTP protocol.\nSince the port configuration is already aligned with the default health check, we don't need to specify any health check configuration when we create this load balancer.\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer create --port 80:80/http demo-env guestbook-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env 80:80/HTTP true\n\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\nloadbalancer create\n: creates a new load balancer\n\n\n--port 80:80/HTTP\n: instructs the load balancer to forward requests from port 80 on the load balancer to port 80 in the EC2 instance using the HTTP protocol\n\n\ndemo-env\n: the name of the environment in which you are creating the load balancer\n\n\nguestbook-lb\n: a name for the load balancer itself\n\n\n\n\nYou can inspect load balancers in the same way that you inspected environments in Part 1.\nTry running the following commands to get an idea of the information available to you:\n\n\n\n\nl0 loadbalancer list\n\n\nl0 loadbalancer get guestbook-lb\n\n\nl0 loadbalancer get gues*\n\n\nl0 loadbalancer get \\*\n\n\n\n\n\n\nNote\n\n\nNotice that the load balancer \nlist\n and \nget\n outputs list an \nENVIRONMENT\n field - if you ever have load balancers (or other Layer0 entities) with the same name but in different environments, you can target a specific load balancer by qualifying it with its environment name:\n\n\n`l0 loadbalancer get demo-env:guestbook-lb`\n\n\n\n\n\n\nPart 3: Deploy the ECS Task Definition\n#\n\n\nThe \ndeploy\n command is used to specify the ECS task definition that outlines a web application.\nA deploy, once created, can be applied to multiple services - even across different environments!\n\n\nHere, we'll create a new deploy called \nguestbook-dpl\n that refers to the \nGuestbook.Dockerrun.aws.json\n file found in the guides reposiory.\nAt the command prompt, execute the following:\n\n\nl0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl\n\n\nWe should see output like the following:\n\n\nDEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.1 guestbook-dpl 1\n\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\ndeploy create\n: creates a new deployment and allows you to specify an ECS task definition\n\n\nGuestbook.Dockerrun.aws.json\n: the file name of the ECS task definition (use the full path of the file if it is not in your current working directory)\n\n\nguestbook-dpl\n: a name for the deploy, which you will use later when you create the service\n\n\n\n\n\n\nDeploy Versioning\n\n\nThe \nDEPLOY NAME\n and \nVERSION\n are combined to create a unique identifier for a deploy.\nIf you create additional deploys named \nguestbook-dpl\n, they will be assigned different version numbers.\n\n\nYou can always specify the latest version when targeting a deploy by using \ndeploy name\n:latest\n -- for example, \nguestbook-dpl:latest\n.\n\n\n\n\nDeploys support the same methods of inspection as environments and load balancers:\n\n\n\n\nl0 deploy list\n\n\nl0 deploy get guestbook*\n\n\nl0 deploy get guestbook:1\n\n\nl0 deploy get guestbook:latest\n\n\nl0 deploy get \\*\n\n\n\n\n\n\nPart 4: Create the Service\n#\n\n\nThe final stage of the deployment process involves using the \nservice\n command to create a new service and associate it with the environment, load balancer, and deploy that we created in the previous sections.\nThe service will execute the Docker containers which have been described in the deploy.\n\n\nHere, we'll create a new service called \nguestbook-svc\n. At the command prompt, execute the following:\n\n\nl0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest\n\n\nWe should see output like the following:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1\n\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\nservice create\n: creates a new service\n\n\n--loadbalancer demo-env:guestbook-lb\n: the fully-qualified name of the load balancer; in this case, the load balancer named \nguestbook-lb\n in the environment named \ndemo-env\n. \n\n\n(It is not strictly necessary to use the fully qualified name of the load balancer, unless another load balancer with exactly the same name exists in a different environment.)\n\n\n\n\n\n\ndemo-env\n: the name of the environment you created in Part 1\n\n\nguestbook-svc\n: a name for the service you are creating\n\n\nguestbook-dpl\n: the name of the deploy that you created in Part 3\n\n\n\n\nLayer0 services can be queried using the same \nget\n and \nlist\n commands that we've come to expect by now.\n\n\n\n\nCheck the Status of the Service\n#\n\n\nAfter a service has been created, it may take several minutes for that service to completely finish deploying.\nA service's status may be checked by using the \nservice get\n command.\n\n\nLet's take a peek at our \nguestbook-svc\n service.\nAt the command prompt, execute the following:\n\n\nl0 service get demo-env:guestbook-svc\n\n\nIf we're quick enough, we'll be able to see the first stage of the process (this is what was output after running the \nservice create\n command up in Part 4).\nWe should see an asterisk (*) next to the name of the \nguestbook-dpl:1\n deploy, which indicates that the service is in a transitional state:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1\n\n\n\n\n\nIn the next phase of deployment, if we execute the \nservice get\n command again, we will see \n(1)\n in the \nScale\n column; this indicates that 1 copy of the service is transitioning to an active state:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 (1)\n\n\n\n\n\nIn the final phase of deployment, we will see \n1/1\n in the \nScale\n column; this indicates that the service is running 1 copy:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1 1/1\n\n\n\n\n\n\n\nGet the Application's URL\n#\n\n\nOnce the service has been completely deployed, we can obtain the URL for the application and launch it in a browser.\n\n\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer get demo-env:guestbook-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env guestbook-svc 80:80/HTTP true \nurl\n\n\n\n\n\n\nCopy the value shown in the \nURL\n column and paste it into a web browser.\nThe guestbook application will appear (once the service has completely finished deploying).\n\n\n\n\nLogs\n#\n\n\nOutput from a Service's docker containers may be acquired by running the following command:\n\n\nl0 service logs \nSERVICE\n\n\n\n\n\n\n\n\nCleanup\n#\n\n\nIf you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application.\n\n\nl0 environment delete demo-env\n\n\nHowever, if you intend to continue through \nDeployment 2\n, you will want to keep the resources you made in this section.\n\n\n\n\nDeploy with Terraform\n#\n\n\nInstead of using the Layer0 CLI directly, you can instead use our Terraform provider, and deploy using Terraform \n(\nlearn more\n)\n.\nYou can use Terraform with Layer0 and AWS to create \"fire-and-forget\" deployments for your applications.\n\n\nIf you're following along, you'll want to be working in the \nwalkthrough/deployment-1/\n directory of your clone of the \nguides\n repo.\n\n\nWe use these files to set up a Layer0 environment with Terraform:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nmain.tf\n\n\nProvisions resources; populates resources in template files\n\n\n\n\n\n\noutputs.tf\n\n\nValues that Terraform will yield during deployment\n\n\n\n\n\n\nterraform.tfstate\n\n\nTracks status of deployment \n(created and managed by Terraform)\n\n\n\n\n\n\nterraform.tfvars\n\n\nVariables specific to the environment and application(s)\n\n\n\n\n\n\nvariables.tf\n\n\nValues that Terraform will use during deployment\n\n\n\n\n\n\n\n\n*.tf\n: A Brief Aside\n#\n\n\nLet's take a moment to discuss the \n.tf\n files.\nThe names of these files (and even the fact that they are separated out into multiple files at all) are completely arbitrary and exist soley for human-readability.\nTerraform understands all \n.tf\n files in a directory all together.\n\n\nIn \nvariables.tf\n, you'll see \n\"endpoint\"\n and \n\"token\"\n variables.\n\n\nIn \noutputs.tf\n, you'll see that Terraform should spit out the url of the guestbook's load balancer once deployment has finished.\n\n\nIn \nmain.tf\n, you'll see the bulk of the deployment process.\nIf you've followed along with the Layer0 CLI deployment above, it should be fairly easy to see how blocks in this file map to steps in the CLI process.\nWhen we began the CLI deployment, our first step was to create an environment:\n\n\nl0 environment create demo-env\n\n\nThis command is recreated in \nmain.tf\n like so:\n\n\n# walkthrough/deployment-1/main.tf\n\nresource \nlayer0_environment\n \ndemo-env\n {\n name = \ndemo-env\n\n}\n\n\n\n\n\nWe've bundled up the heart of the Guestbook deployment (load balancer, deploy, service, etc.) into a \nTerraform module\n.\nTo use it, we declare a \nmodule\n block and pass in the source of the module as well as any configuration or variables that the module needs.\n\n\n# walkthrough/deployment-1/main.tf\n\nmodule \nguestbook\n {\n source = \ngithub.com/quintilesims/guides//guestbook/module\n\n environment_id = \n${\nlayer0_environment\n.\ndemo\n.\nid\n}\n\n}\n\n\n\n\n\nYou can see that we pass in the ID of the environment we create.\nAll variables declared in this block are passed to the module, so the next file we should look at is \nvariables.tf\n inside of the module to get an idea of what the module is expecting.\n\n\nThere are a lot of variables here, but only one of them doesn't have a default value.\n\n\n# guestbook/module/variables.tf\n\nvariable \nenvironment_id\n {\n description = \nid of the layer0 environment in which to create resources\n\n}\n\n\n\n\n\nYou'll notice that this is the variable that we're passing in.\nFor this particular deployment of the Guestbook, all of the default options are fine.\nWe could override any of them if we wanted to, just by specifying a new value for them back in \ndeployment-1/main.tf\n.\n\n\nNow that we've seen the variables that the module will have, let's take a look at part of \nmodule/main.tf\n and see how some of them might be used:\n\n\n# guestbook/module/main.tf\n\nresource \nlayer0_load_balancer\n \nguestbook-lb\n {\n name = \n${\nvar\n.\nload_balancer_name\n}\n\n environment = \n${\nvar\n.\nenvironment_id\n}\n\n port {\n host_port = 80\n container_port = 80\n protocol = \nhttp\n\n }\n}\n\n...\n\n\n\n\n\nYou can follow \nthis link\n to learn more about Layer0 resources in Terraform.\n\n\n\n\nPart 1: Terraform Get\n#\n\n\nThis deployment uses modules, so we'll need to fetch those source materials.\nAt the command prompt, execute the following command:\n\n\nterraform get\n\n\nWe should see output like the following:\n\n\nGet\n:\n \ngit\n::\nhttps\n://\ngithub\n.\ncom\n/quintilesims/g\nuides\n.\ngit\n\n\n\n\n\n\nWe should now have a new local directory called \n.terraform/\n.\nWe don't need to do anything with it; we just want to make sure it's there.\n\n\n\n\nPart 2: Terraform Init\n#\n\n\nThis deployment has provider dependencies so an init call must be made. \n(Terraform v0.11~ requries init)\nAt the command prompt, execute the following command:\n\n\nterraform init\n\n\nWe should see output like the following:\n\n\nInitializing modules...\n- module.guestbook\n\nInitializing provider plugins...\n- Checking for available provider plugins on https://releases.hashicorp.com...\n- Downloading plugin for provider \ntemplate\n (1.0.0)...\n\nThe following providers do not have any version constraints in configuration,\nso the latest version was installed.\n\nTo prevent automatic upgrades to new major versions that may contain breaking\nchanges, it is recommended to add version = \n...\n constraints to the\ncorresponding provider blocks in configuration, with the constraint strings\nsuggested below.\n\n* provider.template: version = \n~\n 1.0\n\n\nTerraform has been successfully initialized!\n\nYou may now begin working with Terraform. Try running \nterraform plan\n to see\nany changes that are required for your infrastructure. All Terraform commands\nshould now work.\n\nIf you ever set or change modules or backend configuration for Terraform,\nrerun this command to reinitialize your working directory. If you forget, other\ncommands will detect it and remind you to do so if necessary.\n\n\n\n\n\n\n\nPart 3: Terraform Plan\n#\n\n\nBefore we actually create/update/delete any resources, it's a good idea to find out what Terraform intends to do.\n\n\nRun \nterraform plan\n. Terraform will prompt you for configuration values that it does not have:\n\n\nvar.endpoint\n Enter a value:\n\nvar.token\n Enter a value:\n\n\n\n\n\nYou can find these values by running \nl0-setup endpoint \nyour layer0 prefix\n.\n\n\n\n\nNote\n\n\nThere are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the \nterraform.tfvars\n file, or exporting evironment variables like \nTF_VAR_endpoint\n and \nTF_VAR_token\n, for example). See the \nTerraform Docs\n for more.\n\n\n\n\nThe \nplan\n command should give us output like the following:\n\n\nRefreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.guestbook: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn\nt specify an \n-out\n parameter to save this plan, so when\n\napply\n is called, Terraform can\nt guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: \ncomputed\n\n cluster_count: \ncomputed\n\n links: \ncomputed\n\n name: \ndemo\n\n os: \nlinux\n\n security_group_id: \ncomputed\n\n size: \nm3.medium\n\n\n+ module.guestbook.layer0_deploy.guestbook\n content: \n{\\n \\\nAWSEBDockerrunVersion\\\n: 2,\\n \\\ncontainerDefinitions\\\n: [\\n {\\n \\\nname\\\n: \\\nguestbook\\\n,\\n \\\nimage\\\n: \\\nquintilesims/guestbook\\\n,\\n \\\nessential\\\n: true,\\n \\\nmemory\\\n: 128,\\n \\\nenvironment\\\n: [\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_TYPE\\\n,\\n \\\nvalue\\\n: \\\nmemory\\\n\\n },\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_CONFIG\\\n,\\n \\\nvalue\\\n: \\\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_ACCESS_KEY_ID\\\n,\\n \\\nvalue\\\n: \\\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_SECRET_ACCESS_KEY\\\n,\\n \\\nvalue\\\n: \\\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_REGION\\\n,\\n \\\nvalue\\\n: \\\nus-west-2\\\n\\n }\\n ],\\n \\\nportMappings\\\n: [\\n {\\n \\\nhostPort\\\n: 80,\\n \\\ncontainerPort\\\n: 80\\n }\\n ]\\n }\\n ]\\n}\\n\n\n name: \nguestbook\n\n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: \n${\nvar\n.\nenvironment_id\n}\n\n health_check.#: \ncomputed\n\n name: \nguestbook\n\n port.#: \n1\n\n port.2027667003.certificate: \n\n port.2027667003.container_port: \n80\n\n port.2027667003.host_port: \n80\n\n port.2027667003.protocol: \nhttp\n\n url: \ncomputed\n\n\n+ module.guestbook.layer0_service.guestbook\n deploy: \n${\n \nvar\n.\ndeploy_id\n \n==\n \\\n\\\n ? layer0_deploy.guestbook.id : var.deploy_id \n}\n\n environment: \n${\nvar\n.\nenvironment_id\n}\n\n load_balancer: \n${\nlayer0_load_balancer\n.\nguestbook\n.\nid\n}\n\n name: \nguestbook\n\n scale: \n1\n\n wait: \ntrue\n\n\n\nPlan: 4 to add, 0 to change, 0 to destroy.\n\n\n\n\n\nThis shows you that Terraform intends to create a deploy, an environment, a load balancer, and a service, all through Layer0.\n\n\nIf you've gone through this deployment using the \nLayer0 CLI\n, you may notice that these resources appear out of order - that's fine. Terraform presents these resources in alphabetical order, but underneath, it knows the correct order in which to create them.\n\n\nOnce we're satisfied that Terraform will do what we want it to do, we can move on to actually making these things exist!\n\n\n\n\nPart 4: Terraform Apply\n#\n\n\nRun \nterraform apply\n to begin the process.\n\n\nWe should see output like the following:\n\n\nlayer0_environment.demo: Refreshing state...\n...\n...\n...\nlayer0_service.guestbook: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = \nhttp endpoint for the sample application\n\n\n\n\n\n\n\n\nNote\n\n\nIt may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.\n\n\n\n\nWhat's Happening\n#\n\n\nTerraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment. Terraform also writes the state of your deployment to the \nterraform.tfstate\n file (creating a new one if it's not already there).\n\n\nCleanup\n#\n\n\nWhen you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application. Execute the following command (in the same directory):\n\n\nterraform destroy\n\n\nIt's also now safe to remove the \n.terraform/\n directory and the \n*.tfstate*\n files.", + "title": "Walkthrough: Deployment 1" + }, + { + "location": "/guides/walkthrough/deployment-1/#deployment-1-a-simple-guestbook-app", + "text": "In this section you'll learn how different Layer0 commands work together to deploy applications to the cloud.\nThe example application in this section is a guestbook -- a web application that acts as a simple message board.\nYou can choose to complete this section using either the Layer0 CLI or Terraform .", + "title": "Deployment 1: A Simple Guestbook App" + }, + { + "location": "/guides/walkthrough/deployment-1/#deploy-with-layer0-cli", + "text": "If you're following along, you'll want to be working in the walkthrough/deployment-1/ directory of your clone of the guides repo. Files used in this deployment: Filename Purpose Guestbook.Dockerrun.aws.json Template for running the Guestbook application", + "title": "Deploy with Layer0 CLI" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-1-create-the-environment", + "text": "The first step in deploying an application with Layer0 is to create an environment.\nAn environment is a dedicated space in which one or more services can reside.\nHere, we'll create a new environment named demo-env .\nAt the command prompt, execute the following: l0 environment create demo-env We should see output like the following: ENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium We can inspect our environments in a couple of different ways: l0 environment list will give us a brief summary of all environments: ENVIRONMENT ID ENVIRONMENT NAME\ndemo00e6aa9 demo-env\napi api l0 environment get demo-env will show us more information about the demo-env environment we just created: ENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium l0 environment get \\* illustrates wildcard matching (you could also have used demo* in the above command), and it will return detailed information for each environment, not just one - it's like a detailed list : ENVIRONMENT ID ENVIRONMENT NAME CLUSTER COUNT INSTANCE SIZE LINKS\ndemo00e6aa9 demo-env 0 m3.medium\napi api 2 m3.medium", + "title": "Part 1: Create the Environment" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-2-create-the-load-balancer", + "text": "In order to expose a web application to the public internet, we need to create a load balancer.\nA load balancer listens for web traffic at a specific address and directs that traffic to a Layer0 service. A load balancer also has a notion of a health check - a way to assess whether or not the service is healthy and running properly.\nBy default, Layer0 configures the health check of a load balancer based upon a simple TCP ping to port 80 every thirty seconds.\nAlso by default, this ping will timeout after five seconds of no response from the service, and two consecutive successes or failures are required for the service to be considered healthy or unhealthy. Here, we'll create a new load balancer named guestbook-lb inside of our environment named demo-env .\nThe load balancer will listen on port 80, and forward that traffic along to port 80 in the Docker container using the HTTP protocol.\nSince the port configuration is already aligned with the default health check, we don't need to specify any health check configuration when we create this load balancer.\nAt the command prompt, execute the following: l0 loadbalancer create --port 80:80/http demo-env guestbook-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env 80:80/HTTP true The following is a summary of the arguments passed in the above command: loadbalancer create : creates a new load balancer --port 80:80/HTTP : instructs the load balancer to forward requests from port 80 on the load balancer to port 80 in the EC2 instance using the HTTP protocol demo-env : the name of the environment in which you are creating the load balancer guestbook-lb : a name for the load balancer itself You can inspect load balancers in the same way that you inspected environments in Part 1.\nTry running the following commands to get an idea of the information available to you: l0 loadbalancer list l0 loadbalancer get guestbook-lb l0 loadbalancer get gues* l0 loadbalancer get \\* Note Notice that the load balancer list and get outputs list an ENVIRONMENT field - if you ever have load balancers (or other Layer0 entities) with the same name but in different environments, you can target a specific load balancer by qualifying it with its environment name: `l0 loadbalancer get demo-env:guestbook-lb`", + "title": "Part 2: Create the Load Balancer" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-3-deploy-the-ecs-task-definition", + "text": "The deploy command is used to specify the ECS task definition that outlines a web application.\nA deploy, once created, can be applied to multiple services - even across different environments! Here, we'll create a new deploy called guestbook-dpl that refers to the Guestbook.Dockerrun.aws.json file found in the guides reposiory.\nAt the command prompt, execute the following: l0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl We should see output like the following: DEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.1 guestbook-dpl 1 The following is a summary of the arguments passed in the above command: deploy create : creates a new deployment and allows you to specify an ECS task definition Guestbook.Dockerrun.aws.json : the file name of the ECS task definition (use the full path of the file if it is not in your current working directory) guestbook-dpl : a name for the deploy, which you will use later when you create the service Deploy Versioning The DEPLOY NAME and VERSION are combined to create a unique identifier for a deploy.\nIf you create additional deploys named guestbook-dpl , they will be assigned different version numbers. You can always specify the latest version when targeting a deploy by using deploy name :latest -- for example, guestbook-dpl:latest . Deploys support the same methods of inspection as environments and load balancers: l0 deploy list l0 deploy get guestbook* l0 deploy get guestbook:1 l0 deploy get guestbook:latest l0 deploy get \\*", + "title": "Part 3: Deploy the ECS Task Definition" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-4-create-the-service", + "text": "The final stage of the deployment process involves using the service command to create a new service and associate it with the environment, load balancer, and deploy that we created in the previous sections.\nThe service will execute the Docker containers which have been described in the deploy. Here, we'll create a new service called guestbook-svc . At the command prompt, execute the following: l0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest We should see output like the following: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 The following is a summary of the arguments passed in the above command: service create : creates a new service --loadbalancer demo-env:guestbook-lb : the fully-qualified name of the load balancer; in this case, the load balancer named guestbook-lb in the environment named demo-env . (It is not strictly necessary to use the fully qualified name of the load balancer, unless another load balancer with exactly the same name exists in a different environment.) demo-env : the name of the environment you created in Part 1 guestbook-svc : a name for the service you are creating guestbook-dpl : the name of the deploy that you created in Part 3 Layer0 services can be queried using the same get and list commands that we've come to expect by now.", + "title": "Part 4: Create the Service" + }, + { + "location": "/guides/walkthrough/deployment-1/#check-the-status-of-the-service", + "text": "After a service has been created, it may take several minutes for that service to completely finish deploying.\nA service's status may be checked by using the service get command. Let's take a peek at our guestbook-svc service.\nAt the command prompt, execute the following: l0 service get demo-env:guestbook-svc If we're quick enough, we'll be able to see the first stage of the process (this is what was output after running the service create command up in Part 4).\nWe should see an asterisk (*) next to the name of the guestbook-dpl:1 deploy, which indicates that the service is in a transitional state: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 In the next phase of deployment, if we execute the service get command again, we will see (1) in the Scale column; this indicates that 1 copy of the service is transitioning to an active state: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1* 0/1 (1) In the final phase of deployment, we will see 1/1 in the Scale column; this indicates that the service is running 1 copy: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo9364b guestbook-svc demo-env guestbook-lb guestbook-dpl:1 1/1", + "title": "Check the Status of the Service" + }, + { + "location": "/guides/walkthrough/deployment-1/#get-the-applications-url", + "text": "Once the service has been completely deployed, we can obtain the URL for the application and launch it in a browser. At the command prompt, execute the following: l0 loadbalancer get demo-env:guestbook-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nguestbodb65a guestbook-lb demo-env guestbook-svc 80:80/HTTP true url Copy the value shown in the URL column and paste it into a web browser.\nThe guestbook application will appear (once the service has completely finished deploying).", + "title": "Get the Application's URL" + }, + { + "location": "/guides/walkthrough/deployment-1/#logs", + "text": "Output from a Service's docker containers may be acquired by running the following command: l0 service logs SERVICE", + "title": "Logs" + }, + { + "location": "/guides/walkthrough/deployment-1/#cleanup", + "text": "If you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application. l0 environment delete demo-env However, if you intend to continue through Deployment 2 , you will want to keep the resources you made in this section.", + "title": "Cleanup" + }, + { + "location": "/guides/walkthrough/deployment-1/#deploy-with-terraform", + "text": "Instead of using the Layer0 CLI directly, you can instead use our Terraform provider, and deploy using Terraform ( learn more ) .\nYou can use Terraform with Layer0 and AWS to create \"fire-and-forget\" deployments for your applications. If you're following along, you'll want to be working in the walkthrough/deployment-1/ directory of your clone of the guides repo. We use these files to set up a Layer0 environment with Terraform: Filename Purpose main.tf Provisions resources; populates resources in template files outputs.tf Values that Terraform will yield during deployment terraform.tfstate Tracks status of deployment (created and managed by Terraform) terraform.tfvars Variables specific to the environment and application(s) variables.tf Values that Terraform will use during deployment", + "title": "Deploy with Terraform" + }, + { + "location": "/guides/walkthrough/deployment-1/#tf-a-brief-aside", + "text": "Let's take a moment to discuss the .tf files.\nThe names of these files (and even the fact that they are separated out into multiple files at all) are completely arbitrary and exist soley for human-readability.\nTerraform understands all .tf files in a directory all together. In variables.tf , you'll see \"endpoint\" and \"token\" variables. In outputs.tf , you'll see that Terraform should spit out the url of the guestbook's load balancer once deployment has finished. In main.tf , you'll see the bulk of the deployment process.\nIf you've followed along with the Layer0 CLI deployment above, it should be fairly easy to see how blocks in this file map to steps in the CLI process.\nWhen we began the CLI deployment, our first step was to create an environment: l0 environment create demo-env This command is recreated in main.tf like so: # walkthrough/deployment-1/main.tf\n\nresource layer0_environment demo-env {\n name = demo-env \n} We've bundled up the heart of the Guestbook deployment (load balancer, deploy, service, etc.) into a Terraform module .\nTo use it, we declare a module block and pass in the source of the module as well as any configuration or variables that the module needs. # walkthrough/deployment-1/main.tf\n\nmodule guestbook {\n source = github.com/quintilesims/guides//guestbook/module \n environment_id = ${ layer0_environment . demo . id } \n} You can see that we pass in the ID of the environment we create.\nAll variables declared in this block are passed to the module, so the next file we should look at is variables.tf inside of the module to get an idea of what the module is expecting. There are a lot of variables here, but only one of them doesn't have a default value. # guestbook/module/variables.tf\n\nvariable environment_id {\n description = id of the layer0 environment in which to create resources \n} You'll notice that this is the variable that we're passing in.\nFor this particular deployment of the Guestbook, all of the default options are fine.\nWe could override any of them if we wanted to, just by specifying a new value for them back in deployment-1/main.tf . Now that we've seen the variables that the module will have, let's take a look at part of module/main.tf and see how some of them might be used: # guestbook/module/main.tf\n\nresource layer0_load_balancer guestbook-lb {\n name = ${ var . load_balancer_name } \n environment = ${ var . environment_id } \n port {\n host_port = 80\n container_port = 80\n protocol = http \n }\n}\n\n... You can follow this link to learn more about Layer0 resources in Terraform.", + "title": "*.tf: A Brief Aside" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-1-terraform-get", + "text": "This deployment uses modules, so we'll need to fetch those source materials.\nAt the command prompt, execute the following command: terraform get We should see output like the following: Get : git :: https :// github . com /quintilesims/g uides . git We should now have a new local directory called .terraform/ .\nWe don't need to do anything with it; we just want to make sure it's there.", + "title": "Part 1: Terraform Get" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-2-terraform-init", + "text": "This deployment has provider dependencies so an init call must be made. \n(Terraform v0.11~ requries init)\nAt the command prompt, execute the following command: terraform init We should see output like the following: Initializing modules...\n- module.guestbook\n\nInitializing provider plugins...\n- Checking for available provider plugins on https://releases.hashicorp.com...\n- Downloading plugin for provider template (1.0.0)...\n\nThe following providers do not have any version constraints in configuration,\nso the latest version was installed.\n\nTo prevent automatic upgrades to new major versions that may contain breaking\nchanges, it is recommended to add version = ... constraints to the\ncorresponding provider blocks in configuration, with the constraint strings\nsuggested below.\n\n* provider.template: version = ~ 1.0 \n\nTerraform has been successfully initialized!\n\nYou may now begin working with Terraform. Try running terraform plan to see\nany changes that are required for your infrastructure. All Terraform commands\nshould now work.\n\nIf you ever set or change modules or backend configuration for Terraform,\nrerun this command to reinitialize your working directory. If you forget, other\ncommands will detect it and remind you to do so if necessary.", + "title": "Part 2: Terraform Init" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-3-terraform-plan", + "text": "Before we actually create/update/delete any resources, it's a good idea to find out what Terraform intends to do. Run terraform plan . Terraform will prompt you for configuration values that it does not have: var.endpoint\n Enter a value:\n\nvar.token\n Enter a value: You can find these values by running l0-setup endpoint your layer0 prefix . Note There are a few ways to configure Terraform so that you don't have to keep entering these values every time you run a Terraform command (editing the terraform.tfvars file, or exporting evironment variables like TF_VAR_endpoint and TF_VAR_token , for example). See the Terraform Docs for more. The plan command should give us output like the following: Refreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.guestbook: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn t specify an -out parameter to save this plan, so when apply is called, Terraform can t guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: computed \n cluster_count: computed \n links: computed \n name: demo \n os: linux \n security_group_id: computed \n size: m3.medium \n\n+ module.guestbook.layer0_deploy.guestbook\n content: {\\n \\ AWSEBDockerrunVersion\\ : 2,\\n \\ containerDefinitions\\ : [\\n {\\n \\ name\\ : \\ guestbook\\ ,\\n \\ image\\ : \\ quintilesims/guestbook\\ ,\\n \\ essential\\ : true,\\n \\ memory\\ : 128,\\n \\ environment\\ : [\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_TYPE\\ ,\\n \\ value\\ : \\ memory\\ \\n },\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_CONFIG\\ ,\\n \\ value\\ : \\ \\ \\n },\\n {\\n \\ name\\ : \\ AWS_ACCESS_KEY_ID\\ ,\\n \\ value\\ : \\ \\ \\n },\\n {\\n \\ name\\ : \\ AWS_SECRET_ACCESS_KEY\\ ,\\n \\ value\\ : \\ \\ \\n },\\n {\\n \\ name\\ : \\ AWS_REGION\\ ,\\n \\ value\\ : \\ us-west-2\\ \\n }\\n ],\\n \\ portMappings\\ : [\\n {\\n \\ hostPort\\ : 80,\\n \\ containerPort\\ : 80\\n }\\n ]\\n }\\n ]\\n}\\n \n name: guestbook \n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: ${ var . environment_id } \n health_check.#: computed \n name: guestbook \n port.#: 1 \n port.2027667003.certificate: \n port.2027667003.container_port: 80 \n port.2027667003.host_port: 80 \n port.2027667003.protocol: http \n url: computed \n\n+ module.guestbook.layer0_service.guestbook\n deploy: ${ var . deploy_id == \\ \\ ? layer0_deploy.guestbook.id : var.deploy_id } \n environment: ${ var . environment_id } \n load_balancer: ${ layer0_load_balancer . guestbook . id } \n name: guestbook \n scale: 1 \n wait: true \n\n\nPlan: 4 to add, 0 to change, 0 to destroy. This shows you that Terraform intends to create a deploy, an environment, a load balancer, and a service, all through Layer0. If you've gone through this deployment using the Layer0 CLI , you may notice that these resources appear out of order - that's fine. Terraform presents these resources in alphabetical order, but underneath, it knows the correct order in which to create them. Once we're satisfied that Terraform will do what we want it to do, we can move on to actually making these things exist!", + "title": "Part 3: Terraform Plan" + }, + { + "location": "/guides/walkthrough/deployment-1/#part-4-terraform-apply", + "text": "Run terraform apply to begin the process. We should see output like the following: layer0_environment.demo: Refreshing state...\n...\n...\n...\nlayer0_service.guestbook: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = http endpoint for the sample application Note It may take a few minutes for the guestbook service to launch and the load balancer to become available. During that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.", + "title": "Part 4: Terraform Apply" + }, + { + "location": "/guides/walkthrough/deployment-1/#whats-happening", + "text": "Terraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment. Terraform also writes the state of your deployment to the terraform.tfstate file (creating a new one if it's not already there).", + "title": "What's Happening" + }, + { + "location": "/guides/walkthrough/deployment-1/#cleanup_1", + "text": "When you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application. Execute the following command (in the same directory): terraform destroy It's also now safe to remove the .terraform/ directory and the *.tfstate* files.", + "title": "Cleanup" + }, + { + "location": "/guides/walkthrough/deployment-2/", + "text": "Deployment 2: Guestbook + Redis\n#\n\n\nIn this section, we're going to add some complexity to the previous deployment.\n\nDeployment 1\n saw us create a simple guestbook application which kept its data in memory.\nBut what if that ever came down, either by intention or accident?\nIt would be easy enough to redeploy it, but all of the entered data would be lost.\nWhat if we wanted to scale the application to run more than one copy?\nFor this deployment, we're going to separate the data store from the guestbook application by creating a second Layer0 service which will house a Redis database server and linking it to the first.\nYou can choose to complete this section using either \nthe Layer0 CLI\n or \nTerraform\n.\n\n\n\n\nDeploy with Layer0 CLI\n#\n\n\nFor this example, we'll be working in the \nwalkthrough/deployment-2/\n directory of the \nguides\n repo.\nWe assume that you've completed the \nLayer0 CLI\n section of Deployment 1.\n\n\nFiles used in this deployment:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nGuestbook.Dockerrun.aws.json\n\n\nTemplate for running the Guestbook application\n\n\n\n\n\n\nRedis.Dockerrun.aws.json\n\n\nTemplate for running a Redis server\n\n\n\n\n\n\n\n\n\n\nPart 1: Create the Redis Load Balancer\n#\n\n\nBoth the Guestbook service and the Redis service will live in the same Layer0 environment, so we don't need to create one like we did in the first deployment.\nWe'll start by making a load balancer behind which the Redis service will be deployed.\n\n\nThe \nRedis.Dockerrun.aws.json\n task definition file we'll use is very simple - it just spins up a Redis server with the default configuration, which means that it will be serving on port 6379.\nOur load balancer needs to be able to forward TCP traffic to and from this port.\nAnd since we don't want the Redis server to be exposed to the public internet, we'll put it behind a private load balancer; private load balancers only accept traffic that originates from within their own environment.\nWe'll also need to specify a non-default healthcheck target, since the load balancer won't expose port 80.\nAt the command prompt, execute the following:\n\n\nl0 loadbalancer create --port 6379:6379/tcp --private --healthcheck-target tcp:6379 demo-env redis-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env 6378:6379:TCP false\n\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\nloadbalancer create\n: creates a new load balancer\n\n\n--port 6379:6379/TCP\n: instructs the load balancer to forward requests from port 6379 on the load balancer to port 6379 in the EC2 instance using the TCP protocol\n\n\n--private\n: instructs the load balancer to ignore external traffic\n\n\n--healthcheck-target tcp:6379\n: instructs the load balancer to check the health of the service via TCP pings to port 6379\n\n\ndemo-env\n: the name of the environment in which the load balancer is being created\n\n\nredis-lb\n: a name for the load balancer itself\n\n\n\n\n\n\nPart 2: Deploy the ECS Task Definition\n#\n\n\nHere, we just need to create the deploy using the \nRedis.Dockerrun.aws.json\n task definition file.\nAt the command prompt, execute the following:\n\n\nl0 deploy create Redis.Dockerrun.aws.json redis-dpl\n\n\nWe should see output like the following:\n\n\nDEPLOY ID DEPLOY NAME VERSION\nredis-dpl.1 redis-dpl 1\n\n\n\n\n\nThe following is a summary of the arguments passed in the above command:\n\n\n\n\ndeploy create\n: creates a new Layer0 Deploy and allows you to specify an ECS task definition\n\n\nRedis.Dockerrun.aws.json\n: the file name of the ECS task definition (use the full path of the file if it is not in your current working directory)\n\n\nredis-dpl\n: a name for the deploy, which we will use later when we create the service\n\n\n\n\n\n\nPart 3: Create the Redis Service\n#\n\n\nHere, we just need to pull the previous resources together into a service.\nAt the command prompt, execute the following:\n\n\nl0 service create --wait --loadbalancer demo-env:redis-lb demo-env redis-svc redis-dpl:latest\n\n\nWe should see output like the following:\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nredislb16ae6 redis-svc demo-env redis-lb redis-dpl:1 0/1\n\n\n\n\n\nThe following is a summary of the arguments passed in the above commands:\n\n\n\n\nservice create\n: creates a new Layer0 Service\n\n\n--wait\n: instructs the CLI to keep hold of the shell until the service has been successfully deployed\n\n\n--loadbalancer demo-env:redis-lb\n: the fully-qualified name of the load balancer; in this case, the load balancer named \nredis-lb\n in the environment named \ndemo-env\n\n\n(Again, it's not strictly necessary to use the fully-qualified name of the load balancer as long as there isn't another load balancer with the same name in a different environment)\n\n\n\n\n\n\ndemo-env\n: the name of the environment in which the service is to reside\n\n\nredis-svc\n: a name for the service we're creating\n\n\nredis-dpl:latest\n: the name of the deploy the service will put into action\n\n\n(We use \n:\n to specify which deploy we want - \n:latest\n will always give us the most recently-created one.)\n\n\n\n\n\n\n\n\n\n\nPart 4: Check the Status of the Redis Service\n#\n\n\nAs in the first deployment, we can keep an eye on our service by using the \nservice get\n command:\n\n\nl0 service get redis-svc\n\n\nOnce the service has finished scaling, try looking at the service's logs to see the output that the Redis server creates:\n\n\nl0 service logs redis-svc\n\n\nAmong some warnings and information not important to this exercise and a fun bit of ASCII art, you should see something like the following:\n\n\n... # words and ASCII art\n1:M 05 Apr 23:29:47.333 * The server is now ready to accept connections on port 6379\n\n\n\n\n\nNow we just need to teach the Guestbook application how to talk with our Redis service.\n\n\n\n\nPart 5: Update the Guestbook Deploy\n#\n\n\nYou should see in \nwalkthrough/deployment-2/\n another \nGuestbook.Dockerrun.aws.json\n file.\nThis file is very similar to but not the same as the one in \ndeployment-1/\n - if you open it up, you can see the following additions:\n\n\n ...\n \nenvironment\n: [\n {\n \nname\n: \nGUESTBOOK_BACKEND_TYPE\n,\n \nvalue\n: \nredis\n\n },\n {\n \nname\n: \nGUESTBOOK_BACKEND_CONFIG\n,\n \nvalue\n: \nredis host and port here\n\n }\n ],\n ...\n\n\n\n\n\nThe \n\"GUESTBOOK_BACKEND_CONFIG\"\n variable is what will point the Guestbook application towards the Redis server.\nThe \nredis host and port here\n section needs to be replaced and populated in the following format:\n\n\nvalue\n: \nADDRESS_OF_REDIS_SERVER:PORT_THE_SERVER_IS_SERVING_ON\n\n\n\n\n\n\nWe already know that Redis is serving on port 6379, so let's go find the server's address.\nRemember, it lives behind a load balancer that we made, so run the following command:\n\n\nl0 loadbalancer get redis-lb\n\n\nWe should see output like the following:\n\n\nLOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env redis-svc 6379:6379/TCP false internal-l0-\nyadda-yadda\n.elb.amazonaws.com\n\n\n\n\n\nCopy that \nURL\n value, replace \nredis host and port here\n with the \nURL\n value in \nGuestbook.Dockerrun.aws.json\n, append \n:6379\n to it, and save the file.\nIt should look something like the following:\n\n\n ...\n \nenvironment\n: [\n {\n \nname\n: \nGUESTBOOK_BACKEND_CONFIG\n,\n \nvalue\n: \ninternal-l0-\nyadda-yadda\n.elb.amazonaws.com:6379\n\n }\n ],\n ...\n\n\n\n\n\nNow, we can create an updated deploy:\n\n\nl0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl\n\n\nWe should see output like the following:\n\n\nDEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.2 guestbook-dpl 2\n\n\n\n\n\n\n\nPart 6: Update the Guestbook Service\n#\n\n\nAlmost all the pieces are in place!\nNow we just need to apply the new Guestbook deploy to the running Guestbook service:\n\n\nl0 service update guestbook-svc guestbook-dpl:latest\n\n\nAs the Guestbook service moves through the phases of its update process, we should see outputs like the following (if we keep an eye on the service with \nl0 service get guestbook-svc\n, that is):\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2* 1/1\n guestbook-dpl:1\n\n\n\n\n\nabove: \nguestbook-dpl:2\n is in a transitional state\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 2/1\n guestbook-dpl:1\n\n\n\n\n\nabove: both versions of the deployment are running at scale\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1\n guestbook-dpl:1*\n\n\n\n\n\nabove: \nguestbook-dpl:1\n is in a transitional state\n\n\nSERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1\n\n\n\n\n\nabove: \nguestbook-dpl:1\n has been removed, and only \nguestbook-dpl:2\n remains\n\n\n\n\nPart 7: Prove It\n#\n\n\nYou should now be able to point your browser at the URL for the Guestbook load balancer (run \nl0 loadbalancer get guestbook-lb\n to find it) and see what looks like the same Guestbook application you deployed in the first section of the walkthrough.\nGo ahead and add a few entries, make sure it's functioning properly.\nWe'll wait.\n\n\nNow, let's prove that we've actually separated the data from the application by deleting and redeploying the Guestbook application:\n\n\nl0 service delete --wait guestbook-svc\n\n\n(We'll leave the \ndeploy\n intact so we can spin up a new service easily, and we'll leave the environment untouched because it also contained the Redis server.\nWe'll also pass the \n--wait\n flag so that we don't need to keep checking on the status of the job to know when it's complete.)\n\n\nOnce those resources have been deleted, we can recreate them!\n\n\nCreate another service, using the \nguestbook-dpl\n deploy we kept around:\n\n\nl0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest\n\n\nWait for everything to spin up, and hit that new load balancer's url (\nl0 loadbalancer get guestbook-lb\n) with your browser.\nYour data should still be there!\n\n\n\n\nCleanup\n#\n\n\nIf you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application.\n\n\nl0 environment delete demo-env\n\n\nHowever, if you intend to continue through \nDeployment 3\n, you will want to keep the resources you made in this section.\n\n\n\n\nDeploy with Terraform\n#\n\n\nAs before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI. As before, we will assume that you've cloned the \nguides\n repo and are working in the \nwalkthrough/deployment-2/\n directory.\n\n\nWe'll use these files to manage our deployment with Terraform:\n\n\n\n\n\n\n\n\nFilename\n\n\nPurpose\n\n\n\n\n\n\n\n\n\n\nmain.tf\n\n\nProvisions resources; populates variables in template files\n\n\n\n\n\n\noutputs.tf\n\n\nValues that Terraform will yield during deployment\n\n\n\n\n\n\nterraform.tfstate\n\n\nTracks status of deployment \n(created and managed by Terraform)\n\n\n\n\n\n\nterraform.tfvars\n\n\nVariables specific to the environment and application(s)\n\n\n\n\n\n\nvariables.tf\n\n\nValues that Terraform will use during deployment\n\n\n\n\n\n\n\n\n\n\n*.tf\n: A Brief Aside: Revisited\n#\n\n\nNot much is changed from \nDeployment 1\n.\nIn \nmain.tf\n, we pull in a new, second module that will deploy Redis for us.\nWe maintain this module as well; you can inspect \nthe repo\n if you'd like.\n\n\nIn \nmain.tf\n where we pull in the Guestbook module, you'll see that we're supplying more values than we did last time, because we need some additional configuration to let the Guestbook application use a Redis backend instead of its default in-memory storage.\n\n\n\n\nPart 1: Terraform Get\n#\n\n\nRun \nterraform get\n to pull down the source materials Terraform will use for deployment.\nThis will create a local \n.terraform/\n directory.\n\n\n\n\nPart 2: Terraform Init\n#\n\n\nThis deployment has provider dependencies so an init call must be made. \n(Terraform v0.11~ requries init)\nAt the command prompt, execute the following command:\n\n\nterraform init\n\n\nWe should see output like the following:\n\n\nInitializing modules...\n- module.redis\n Getting source \ngithub.com/quintilesims/redis//terraform\n\n- module.guestbook\n Getting source \ngithub.com/quintilesims/guides//guestbook/module\n\n\nInitializing provider plugins...\n- Checking for available provider plugins on https://releases.hashicorp.com...\n- Downloading plugin for provider \ntemplate\n (1.0.0)...\n\nThe following providers do not have any version constraints in configuration,\nso the latest version was installed.\n\nTo prevent automatic upgrades to new major versions that may contain breaking\nchanges, it is recommended to add version = \n...\n constraints to the\ncorresponding provider blocks in configuration, with the constraint strings\nsuggested below.\n\n* provider.template: version = \n~\n 1.0\n\n\nTerraform has been successfully initialized!\n\nYou may now begin working with Terraform. Try running \nterraform plan\n to see\nany changes that are required for your infrastructure. All Terraform commands\nshould now work.\n\nIf you ever set or change modules or backend configuration for Terraform,\nrerun this command to reinitialize your working directory. If you forget, other\ncommands will detect it and remind you to do so if necessary.\n\n\n\n\n\n\n\nPart 3: Terraform Plan\n#\n\n\nIt's always a good idea to find out what Terraform intends to do, so let's do that:\n\n\nterraform plan\n\n\nAs before, we'll be prompted for any variables Terraform needs and doesn't have (see the note in \nDeployment 1\n for configuring Terraform variables).\nWe'll see output similar to the following:\n\n\nRefreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.redis: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn\nt specify an \n-out\n parameter to save this plan, so when\n\napply\n is called, Terraform can\nt guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: \ncomputed\n\n cluster_count: \ncomputed\n\n links: \ncomputed\n\n name: \ndemo\n\n os: \nlinux\n\n security_group_id: \ncomputed\n\n size: \nm3.medium\n\n\n+ module.redis.layer0_deploy.redis\n content: \n{\\n \\\nAWSEBDockerrunVersion\\\n: 2,\\n \\\ncontainerDefinitions\\\n: [\\n {\\n \\\nname\\\n: \\\nredis\\\n,\\n \\\nimage\\\n: \\\nredis:3.2-alpine\\\n,\\n \\\nessential\\\n: true,\\n \\\nmemory\\\n: 128,\\n \\\nportMappings\\\n: [\\n {\\n \\\nhostPort\\\n: 6379,\\n \\\ncontainerPort\\\n: 6379\\n }\\n ]\\n }\\n ]\\n}\\n\\n\n\n name: \nredis\n\n\n+ module.redis.layer0_load_balancer.redis\n environment: \n${\nvar\n.\nenvironment_id\n}\n\n health_check.#: \ncomputed\n\n name: \nredis\n\n port.#: \n1\n\n port.1072619732.certificate: \n\n port.1072619732.container_port: \n6379\n\n port.1072619732.host_port: \n6379\n\n port.1072619732.protocol: \ntcp\n\n private: \ntrue\n\n url: \ncomputed\n\n\n+ module.redis.layer0_service.redis\n deploy: \n${\n \nvar\n.\ndeploy_id\n \n==\n \\\n\\\n ? layer0_deploy.redis.id : var.deploy_id \n}\n\n environment: \n${\nvar\n.\nenvironment_id\n}\n\n load_balancer: \n${\nlayer0_load_balancer\n.\nredis\n.\nid\n}\n\n name: \nredis\n\n scale: \n1\n\n wait: \ntrue\n\n\n\n= module.guestbook.data.template_file.guestbook\n rendered: \ncomputed\n\n template: \n{\\n \\\nAWSEBDockerrunVersion\\\n: 2,\\n \\\ncontainerDefinitions\\\n: [\\n {\\n \\\nname\\\n: \\\nguestbook\\\n,\\n \\\nimage\\\n: \\\nquintilesims/guestbook\\\n,\\n \\\nessential\\\n: true,\\n \\\nmemory\\\n: 128,\\n \\\nenvironment\\\n: [\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_TYPE\\\n,\\n \\\nvalue\\\n: \\\n${\nbackend_type\n}\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nGUESTBOOK_BACKEND_CONFIG\\\n,\\n \\\nvalue\\\n: \\\n${\nbackend_config\n}\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_ACCESS_KEY_ID\\\n,\\n \\\nvalue\\\n: \\\n${\naccess_key\n}\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_SECRET_ACCESS_KEY\\\n,\\n \\\nvalue\\\n: \\\n${\nsecret_key\n}\n\\\n\\n },\\n {\\n \\\nname\\\n: \\\nAWS_REGION\\\n,\\n \\\nvalue\\\n: \\\n${\nregion\n}\n\\\n\\n }\\n ],\\n \\\nportMappings\\\n: [\\n {\\n \\\nhostPort\\\n: 80,\\n \\\ncontainerPort\\\n: 80\\n }\\n ]\\n }\\n ]\\n}\\n\n\n vars.%: \ncomputed\n\n\n+ module.guestbook.layer0_deploy.guestbook\n content: \n${\ndata\n.\ntemplate_file\n.\nguestbook\n.\nrendered\n}\n\n name: \nguestbook\n\n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: \n${\nvar\n.\nenvironment_id\n}\n\n health_check.#: \ncomputed\n\n name: \nguestbook\n\n port.#: \n1\n\n port.2027667003.certificate: \n\n port.2027667003.container_port: \n80\n\n port.2027667003.host_port: \n80\n\n port.2027667003.protocol: \nhttp\n\n url: \ncomputed\n\n\n+ module.guestbook.layer0_service.guestbook\n deploy: \n${\n \nvar\n.\ndeploy_id\n \n==\n \\\n\\\n ? layer0_deploy.guestbook.id : var.deploy_id \n}\n\n environment: \n${\nvar\n.\nenvironment_id\n}\n\n load_balancer: \n${\nlayer0_load_balancer\n.\nguestbook\n.\nid\n}\n\n name: \nguestbook\n\n scale: \n2\n\n wait: \ntrue\n\n\n\nPlan: 7 to add, 0 to change, 0 to destroy.\n\n\n\n\n\nWe should see that Terraform intends to add 7 new resources, some of which are for the Guestbook deployment and some of which are for the Redis deployment.\n\n\n\n\nPart 4: Terraform Apply\n#\n\n\nRun \nterraform apply\n, and we should see output similar to the following:\n\n\ndata.template_file.redis: Refreshing state...\nlayer0_deploy.redis-dpl: Creating...\n\n...\n...\n...\n\nlayer0_service.guestbook-svc: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = \nhttp endpoint for the sample application\n\n\n\n\n\n\n\n\nNote\n\n\nIt may take a few minutes for the guestbook service to launch and the load balancer to become available.\nDuring that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.\n\n\n\n\nWhat's Happening\n#\n\n\nTerraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment.\nTerraform also writes the state of your deployment to the \nterraform.tfstate\n file (creating a new one if it's not already there).\n\n\nCleanup\n#\n\n\nWhen you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application.\nExecute the following command (in the same directory):\n\n\nterraform destroy\n\n\nIt's also now safe to remove the \n.terraform/\n directory and the \n*.tfstate*\n files.", + "title": "Walkthrough: Deployment 2" + }, + { + "location": "/guides/walkthrough/deployment-2/#deployment-2-guestbook-redis", + "text": "In this section, we're going to add some complexity to the previous deployment. Deployment 1 saw us create a simple guestbook application which kept its data in memory.\nBut what if that ever came down, either by intention or accident?\nIt would be easy enough to redeploy it, but all of the entered data would be lost.\nWhat if we wanted to scale the application to run more than one copy?\nFor this deployment, we're going to separate the data store from the guestbook application by creating a second Layer0 service which will house a Redis database server and linking it to the first.\nYou can choose to complete this section using either the Layer0 CLI or Terraform .", + "title": "Deployment 2: Guestbook + Redis" + }, + { + "location": "/guides/walkthrough/deployment-2/#deploy-with-layer0-cli", + "text": "For this example, we'll be working in the walkthrough/deployment-2/ directory of the guides repo.\nWe assume that you've completed the Layer0 CLI section of Deployment 1. Files used in this deployment: Filename Purpose Guestbook.Dockerrun.aws.json Template for running the Guestbook application Redis.Dockerrun.aws.json Template for running a Redis server", + "title": "Deploy with Layer0 CLI" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-1-create-the-redis-load-balancer", + "text": "Both the Guestbook service and the Redis service will live in the same Layer0 environment, so we don't need to create one like we did in the first deployment.\nWe'll start by making a load balancer behind which the Redis service will be deployed. The Redis.Dockerrun.aws.json task definition file we'll use is very simple - it just spins up a Redis server with the default configuration, which means that it will be serving on port 6379.\nOur load balancer needs to be able to forward TCP traffic to and from this port.\nAnd since we don't want the Redis server to be exposed to the public internet, we'll put it behind a private load balancer; private load balancers only accept traffic that originates from within their own environment.\nWe'll also need to specify a non-default healthcheck target, since the load balancer won't expose port 80.\nAt the command prompt, execute the following: l0 loadbalancer create --port 6379:6379/tcp --private --healthcheck-target tcp:6379 demo-env redis-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env 6378:6379:TCP false The following is a summary of the arguments passed in the above command: loadbalancer create : creates a new load balancer --port 6379:6379/TCP : instructs the load balancer to forward requests from port 6379 on the load balancer to port 6379 in the EC2 instance using the TCP protocol --private : instructs the load balancer to ignore external traffic --healthcheck-target tcp:6379 : instructs the load balancer to check the health of the service via TCP pings to port 6379 demo-env : the name of the environment in which the load balancer is being created redis-lb : a name for the load balancer itself", + "title": "Part 1: Create the Redis Load Balancer" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-2-deploy-the-ecs-task-definition", + "text": "Here, we just need to create the deploy using the Redis.Dockerrun.aws.json task definition file.\nAt the command prompt, execute the following: l0 deploy create Redis.Dockerrun.aws.json redis-dpl We should see output like the following: DEPLOY ID DEPLOY NAME VERSION\nredis-dpl.1 redis-dpl 1 The following is a summary of the arguments passed in the above command: deploy create : creates a new Layer0 Deploy and allows you to specify an ECS task definition Redis.Dockerrun.aws.json : the file name of the ECS task definition (use the full path of the file if it is not in your current working directory) redis-dpl : a name for the deploy, which we will use later when we create the service", + "title": "Part 2: Deploy the ECS Task Definition" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-3-create-the-redis-service", + "text": "Here, we just need to pull the previous resources together into a service.\nAt the command prompt, execute the following: l0 service create --wait --loadbalancer demo-env:redis-lb demo-env redis-svc redis-dpl:latest We should see output like the following: SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nredislb16ae6 redis-svc demo-env redis-lb redis-dpl:1 0/1 The following is a summary of the arguments passed in the above commands: service create : creates a new Layer0 Service --wait : instructs the CLI to keep hold of the shell until the service has been successfully deployed --loadbalancer demo-env:redis-lb : the fully-qualified name of the load balancer; in this case, the load balancer named redis-lb in the environment named demo-env (Again, it's not strictly necessary to use the fully-qualified name of the load balancer as long as there isn't another load balancer with the same name in a different environment) demo-env : the name of the environment in which the service is to reside redis-svc : a name for the service we're creating redis-dpl:latest : the name of the deploy the service will put into action (We use : to specify which deploy we want - :latest will always give us the most recently-created one.)", + "title": "Part 3: Create the Redis Service" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-4-check-the-status-of-the-redis-service", + "text": "As in the first deployment, we can keep an eye on our service by using the service get command: l0 service get redis-svc Once the service has finished scaling, try looking at the service's logs to see the output that the Redis server creates: l0 service logs redis-svc Among some warnings and information not important to this exercise and a fun bit of ASCII art, you should see something like the following: ... # words and ASCII art\n1:M 05 Apr 23:29:47.333 * The server is now ready to accept connections on port 6379 Now we just need to teach the Guestbook application how to talk with our Redis service.", + "title": "Part 4: Check the Status of the Redis Service" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-5-update-the-guestbook-deploy", + "text": "You should see in walkthrough/deployment-2/ another Guestbook.Dockerrun.aws.json file.\nThis file is very similar to but not the same as the one in deployment-1/ - if you open it up, you can see the following additions: ...\n environment : [\n {\n name : GUESTBOOK_BACKEND_TYPE ,\n value : redis \n },\n {\n name : GUESTBOOK_BACKEND_CONFIG ,\n value : redis host and port here \n }\n ],\n ... The \"GUESTBOOK_BACKEND_CONFIG\" variable is what will point the Guestbook application towards the Redis server.\nThe redis host and port here section needs to be replaced and populated in the following format: value : ADDRESS_OF_REDIS_SERVER:PORT_THE_SERVER_IS_SERVING_ON We already know that Redis is serving on port 6379, so let's go find the server's address.\nRemember, it lives behind a load balancer that we made, so run the following command: l0 loadbalancer get redis-lb We should see output like the following: LOADBALANCER ID LOADBALANCER NAME ENVIRONMENT SERVICE PORTS PUBLIC URL\nredislb16ae6 redis-lb demo-env redis-svc 6379:6379/TCP false internal-l0- yadda-yadda .elb.amazonaws.com Copy that URL value, replace redis host and port here with the URL value in Guestbook.Dockerrun.aws.json , append :6379 to it, and save the file.\nIt should look something like the following: ...\n environment : [\n {\n name : GUESTBOOK_BACKEND_CONFIG ,\n value : internal-l0- yadda-yadda .elb.amazonaws.com:6379 \n }\n ],\n ... Now, we can create an updated deploy: l0 deploy create Guestbook.Dockerrun.aws.json guestbook-dpl We should see output like the following: DEPLOY ID DEPLOY NAME VERSION\nguestbook-dpl.2 guestbook-dpl 2", + "title": "Part 5: Update the Guestbook Deploy" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-6-update-the-guestbook-service", + "text": "Almost all the pieces are in place!\nNow we just need to apply the new Guestbook deploy to the running Guestbook service: l0 service update guestbook-svc guestbook-dpl:latest As the Guestbook service moves through the phases of its update process, we should see outputs like the following (if we keep an eye on the service with l0 service get guestbook-svc , that is): SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2* 1/1\n guestbook-dpl:1 above: guestbook-dpl:2 is in a transitional state SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 2/1\n guestbook-dpl:1 above: both versions of the deployment are running at scale SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1\n guestbook-dpl:1* above: guestbook-dpl:1 is in a transitional state SERVICE ID SERVICE NAME ENVIRONMENT LOADBALANCER DEPLOYMENTS SCALE\nguestbo5fadd guestbook-svc demo-env guestbook-lb guestbook-dpl:2 1/1 above: guestbook-dpl:1 has been removed, and only guestbook-dpl:2 remains", + "title": "Part 6: Update the Guestbook Service" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-7-prove-it", + "text": "You should now be able to point your browser at the URL for the Guestbook load balancer (run l0 loadbalancer get guestbook-lb to find it) and see what looks like the same Guestbook application you deployed in the first section of the walkthrough.\nGo ahead and add a few entries, make sure it's functioning properly.\nWe'll wait. Now, let's prove that we've actually separated the data from the application by deleting and redeploying the Guestbook application: l0 service delete --wait guestbook-svc (We'll leave the deploy intact so we can spin up a new service easily, and we'll leave the environment untouched because it also contained the Redis server.\nWe'll also pass the --wait flag so that we don't need to keep checking on the status of the job to know when it's complete.) Once those resources have been deleted, we can recreate them! Create another service, using the guestbook-dpl deploy we kept around: l0 service create --loadbalancer demo-env:guestbook-lb demo-env guestbook-svc guestbook-dpl:latest Wait for everything to spin up, and hit that new load balancer's url ( l0 loadbalancer get guestbook-lb ) with your browser.\nYour data should still be there!", + "title": "Part 7: Prove It" + }, + { + "location": "/guides/walkthrough/deployment-2/#cleanup", + "text": "If you're finished with the example and don't want to continue with this walkthrough, you can instruct Layer0 to delete the environment and terminate the application. l0 environment delete demo-env However, if you intend to continue through Deployment 3 , you will want to keep the resources you made in this section.", + "title": "Cleanup" + }, + { + "location": "/guides/walkthrough/deployment-2/#deploy-with-terraform", + "text": "As before, we can complete this deployment using Terraform and the Layer0 provider instead of the Layer0 CLI. As before, we will assume that you've cloned the guides repo and are working in the walkthrough/deployment-2/ directory. We'll use these files to manage our deployment with Terraform: Filename Purpose main.tf Provisions resources; populates variables in template files outputs.tf Values that Terraform will yield during deployment terraform.tfstate Tracks status of deployment (created and managed by Terraform) terraform.tfvars Variables specific to the environment and application(s) variables.tf Values that Terraform will use during deployment", + "title": "Deploy with Terraform" + }, + { + "location": "/guides/walkthrough/deployment-2/#tf-a-brief-aside-revisited", + "text": "Not much is changed from Deployment 1 .\nIn main.tf , we pull in a new, second module that will deploy Redis for us.\nWe maintain this module as well; you can inspect the repo if you'd like. In main.tf where we pull in the Guestbook module, you'll see that we're supplying more values than we did last time, because we need some additional configuration to let the Guestbook application use a Redis backend instead of its default in-memory storage.", + "title": "*.tf: A Brief Aside: Revisited" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-1-terraform-get", + "text": "Run terraform get to pull down the source materials Terraform will use for deployment.\nThis will create a local .terraform/ directory.", + "title": "Part 1: Terraform Get" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-2-terraform-init", + "text": "This deployment has provider dependencies so an init call must be made. \n(Terraform v0.11~ requries init)\nAt the command prompt, execute the following command: terraform init We should see output like the following: Initializing modules...\n- module.redis\n Getting source github.com/quintilesims/redis//terraform \n- module.guestbook\n Getting source github.com/quintilesims/guides//guestbook/module \n\nInitializing provider plugins...\n- Checking for available provider plugins on https://releases.hashicorp.com...\n- Downloading plugin for provider template (1.0.0)...\n\nThe following providers do not have any version constraints in configuration,\nso the latest version was installed.\n\nTo prevent automatic upgrades to new major versions that may contain breaking\nchanges, it is recommended to add version = ... constraints to the\ncorresponding provider blocks in configuration, with the constraint strings\nsuggested below.\n\n* provider.template: version = ~ 1.0 \n\nTerraform has been successfully initialized!\n\nYou may now begin working with Terraform. Try running terraform plan to see\nany changes that are required for your infrastructure. All Terraform commands\nshould now work.\n\nIf you ever set or change modules or backend configuration for Terraform,\nrerun this command to reinitialize your working directory. If you forget, other\ncommands will detect it and remind you to do so if necessary.", + "title": "Part 2: Terraform Init" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-3-terraform-plan", + "text": "It's always a good idea to find out what Terraform intends to do, so let's do that: terraform plan As before, we'll be prompted for any variables Terraform needs and doesn't have (see the note in Deployment 1 for configuring Terraform variables).\nWe'll see output similar to the following: Refreshing Terraform state in-memory prior to plan...\nThe refreshed state will be used to calculate this plan, but will not be\npersisted to local or remote state storage.\n\ndata.template_file.redis: Refreshing state...\nThe Terraform execution plan has been generated and is shown below.\nResources are shown in alphabetical order for quick scanning. Green resources\nwill be created (or destroyed and then created if an existing resource\nexists), yellow resources are being changed in-place, and red resources\nwill be destroyed. Cyan entries are data sources to be read.\n\nNote: You didn t specify an -out parameter to save this plan, so when apply is called, Terraform can t guarantee this is what will execute.\n\n+ layer0_environment.demo\n ami: computed \n cluster_count: computed \n links: computed \n name: demo \n os: linux \n security_group_id: computed \n size: m3.medium \n\n+ module.redis.layer0_deploy.redis\n content: {\\n \\ AWSEBDockerrunVersion\\ : 2,\\n \\ containerDefinitions\\ : [\\n {\\n \\ name\\ : \\ redis\\ ,\\n \\ image\\ : \\ redis:3.2-alpine\\ ,\\n \\ essential\\ : true,\\n \\ memory\\ : 128,\\n \\ portMappings\\ : [\\n {\\n \\ hostPort\\ : 6379,\\n \\ containerPort\\ : 6379\\n }\\n ]\\n }\\n ]\\n}\\n\\n \n name: redis \n\n+ module.redis.layer0_load_balancer.redis\n environment: ${ var . environment_id } \n health_check.#: computed \n name: redis \n port.#: 1 \n port.1072619732.certificate: \n port.1072619732.container_port: 6379 \n port.1072619732.host_port: 6379 \n port.1072619732.protocol: tcp \n private: true \n url: computed \n\n+ module.redis.layer0_service.redis\n deploy: ${ var . deploy_id == \\ \\ ? layer0_deploy.redis.id : var.deploy_id } \n environment: ${ var . environment_id } \n load_balancer: ${ layer0_load_balancer . redis . id } \n name: redis \n scale: 1 \n wait: true = module.guestbook.data.template_file.guestbook\n rendered: computed \n template: {\\n \\ AWSEBDockerrunVersion\\ : 2,\\n \\ containerDefinitions\\ : [\\n {\\n \\ name\\ : \\ guestbook\\ ,\\n \\ image\\ : \\ quintilesims/guestbook\\ ,\\n \\ essential\\ : true,\\n \\ memory\\ : 128,\\n \\ environment\\ : [\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_TYPE\\ ,\\n \\ value\\ : \\ ${ backend_type } \\ \\n },\\n {\\n \\ name\\ : \\ GUESTBOOK_BACKEND_CONFIG\\ ,\\n \\ value\\ : \\ ${ backend_config } \\ \\n },\\n {\\n \\ name\\ : \\ AWS_ACCESS_KEY_ID\\ ,\\n \\ value\\ : \\ ${ access_key } \\ \\n },\\n {\\n \\ name\\ : \\ AWS_SECRET_ACCESS_KEY\\ ,\\n \\ value\\ : \\ ${ secret_key } \\ \\n },\\n {\\n \\ name\\ : \\ AWS_REGION\\ ,\\n \\ value\\ : \\ ${ region } \\ \\n }\\n ],\\n \\ portMappings\\ : [\\n {\\n \\ hostPort\\ : 80,\\n \\ containerPort\\ : 80\\n }\\n ]\\n }\\n ]\\n}\\n \n vars.%: computed \n\n+ module.guestbook.layer0_deploy.guestbook\n content: ${ data . template_file . guestbook . rendered } \n name: guestbook \n\n+ module.guestbook.layer0_load_balancer.guestbook\n environment: ${ var . environment_id } \n health_check.#: computed \n name: guestbook \n port.#: 1 \n port.2027667003.certificate: \n port.2027667003.container_port: 80 \n port.2027667003.host_port: 80 \n port.2027667003.protocol: http \n url: computed \n\n+ module.guestbook.layer0_service.guestbook\n deploy: ${ var . deploy_id == \\ \\ ? layer0_deploy.guestbook.id : var.deploy_id } \n environment: ${ var . environment_id } \n load_balancer: ${ layer0_load_balancer . guestbook . id } \n name: guestbook \n scale: 2 \n wait: true \n\n\nPlan: 7 to add, 0 to change, 0 to destroy. We should see that Terraform intends to add 7 new resources, some of which are for the Guestbook deployment and some of which are for the Redis deployment.", + "title": "Part 3: Terraform Plan" + }, + { + "location": "/guides/walkthrough/deployment-2/#part-4-terraform-apply", + "text": "Run terraform apply , and we should see output similar to the following: data.template_file.redis: Refreshing state...\nlayer0_deploy.redis-dpl: Creating...\n\n...\n...\n...\n\nlayer0_service.guestbook-svc: Creation complete\n\nApply complete! Resources: 7 added, 0 changed, 0 destroyed.\n\nThe state of your infrastructure has been saved to the path\nbelow. This state is required to modify and destroy your\ninfrastructure, so keep it safe. To inspect the complete state\nuse the `terraform show` command.\n\nState path: terraform.tfstate\n\nOutputs:\n\nguestbook_url = http endpoint for the sample application Note It may take a few minutes for the guestbook service to launch and the load balancer to become available.\nDuring that time you may get HTTP 503 errors when making HTTP requests against the load balancer URL.", + "title": "Part 4: Terraform Apply" + }, + { + "location": "/guides/walkthrough/deployment-2/#whats-happening", + "text": "Terraform provisions the AWS resources through Layer0, configures environment variables for the application, and deploys the application into a Layer0 environment.\nTerraform also writes the state of your deployment to the terraform.tfstate file (creating a new one if it's not already there).", + "title": "What's Happening" + }, + { + "location": "/guides/walkthrough/deployment-2/#cleanup_1", + "text": "When you're finished with the example, you can instruct Terraform to destroy the Layer0 environment, and terminate the application.\nExecute the following command (in the same directory): terraform destroy It's also now safe to remove the .terraform/ directory and the *.tfstate* files.", + "title": "Cleanup" + }, + { + "location": "/guides/one_off_task/", + "text": "Deployment guide: Guestbook one-off task\n#\n\n\nIn this example, you will learn how to use layer0 to run a one-off task. A task is used to run a single instance of your Task Definition and is typically a short running job that will be stopped once finished.\n\n\n\n\nBefore you start\n#\n\n\nIn order to complete the procedures in this section, you must install and configure Layer0 v0.8.4 or later. If you have not already configured Layer0, see the \ninstallation guide\n. If you are running an older version of Layer0, see the \nupgrade instructions\n.\n\n\nPart 1: Prepare the task definition\n#\n\n\n\n\nDownload the \nGuestbook One-off Task Definition\n and save it to your computer as \nDockerrun.aws.json\n.\n\n\n\n\nPart 2: Create a deploy\n#\n\n\nNext, you will create a new deploy for the task using the \ndeploy create\n command. At the command prompt, run the following command:\n\n\nl0 deploy create Dockerrun.aws.json one-off-task-dpl\n\n\nYou will see the following output:\n\n\nDEPLOY ID DEPLOY NAME VERSION\none-off-task-dpl.1 one-off-task-dpl 1\n\n\n\n\n\nPart 3: Create the task\n#\n\n\nAt this point, you can use the \ntask create\n command to run a copy of the task.\n\n\nTo run the task, use the following command:\n\n\nl0 task create demo-env echo-tsk one-off-task-dpl:latest --wait\n\n\nYou will see the following output:\n\n\nTASK ID TASK NAME ENVIRONMENT DEPLOY SCALE\none-off851c9 echo-tsk demo-env one-off-task-dpl:1 0/1 (1)\n\n\n\n\n\nThe \nSCALE\n column shows the running, desired and pending counts. A value of \n0/1 (1)\n indicates that running = 0, desired = 1 and (1) for 1 pending task that is about to transition to running state. After your task has finished running, note that the desired count will remain 1 and pending value will no longer be shown, so the value will be \n0/1\n for a finished task.\n\n\nPart 4: Check the status of the task\n#\n\n\nTo view the logs for this task, and evaluate its progress, you can use the \ntask logs\n command:\n\n\nl0 task logs one-off-task-tsk\n \n\n\nYou will see the following output:\n\n\nalpine\n\n\n------\n\nTask finished!\n\n\n\n\n\nYou can also use the following command for more information in the task.\n\n\nl0 -o json task get echo-tsk\n\n\nOutputs:\n\n\n[\n {\n \ncopies\n: [\n {\n \ndetails\n: [],\n \nreason\n: \nWaiting for cluster capacity to run\n,\n \ntask_copy_id\n: \n\n }\n ],\n \ndeploy_id\n: \none-off-task-dpl.2\n,\n \ndeploy_name\n: \none-off-task-dpl\n,\n \ndeploy_version\n: \n2\n,\n \ndesired_count\n: 1,\n \nenvironment_id\n: \ndemoenv669e4\n,\n \nenvironment_name\n: \ndemo-env\n,\n \npending_count\n: 1,\n \nrunning_count\n: 0,\n \ntask_id\n: \nechotsk1facd\n,\n \ntask_name\n: \necho-tsk\n\n }\n]\n\n\n\n\n\nAfter the task has finished, running \nl0 -o json task get echo-tsk\n will show a pending_count of 0.\n\n\nOutputs:\n\n\n...\n\ncopies\n: [\n {\n \ndetails\n: [\n {\n \ncontainer_name\n: \nalpine\n,\n \nexit_code\n: 0,\n \nlast_status\n: \nSTOPPED\n,\n \nreason\n: \n\n }\n ],\n \nreason\n: \nEssential container in task exited\n,\n \ntask_copy_id\n: \narn:aws:ecs:us-west-2:856306994068:task/0e723c3e-9cd1-4914-8393-b59abd40eb89\n\n }\n],\n...\n\npending_count\n: 0,\n\nrunning_count\n: 0,\n...", + "title": "One-off Task" + }, + { + "location": "/guides/one_off_task/#deployment-guide-guestbook-one-off-task", + "text": "In this example, you will learn how to use layer0 to run a one-off task. A task is used to run a single instance of your Task Definition and is typically a short running job that will be stopped once finished.", + "title": "Deployment guide: Guestbook one-off task" + }, + { + "location": "/guides/one_off_task/#before-you-start", + "text": "In order to complete the procedures in this section, you must install and configure Layer0 v0.8.4 or later. If you have not already configured Layer0, see the installation guide . If you are running an older version of Layer0, see the upgrade instructions .", + "title": "Before you start" + }, + { + "location": "/guides/one_off_task/#part-1-prepare-the-task-definition", + "text": "Download the Guestbook One-off Task Definition and save it to your computer as Dockerrun.aws.json .", + "title": "Part 1: Prepare the task definition" + }, + { + "location": "/guides/one_off_task/#part-2-create-a-deploy", + "text": "Next, you will create a new deploy for the task using the deploy create command. At the command prompt, run the following command: l0 deploy create Dockerrun.aws.json one-off-task-dpl You will see the following output: DEPLOY ID DEPLOY NAME VERSION\none-off-task-dpl.1 one-off-task-dpl 1", + "title": "Part 2: Create a deploy" + }, + { + "location": "/guides/one_off_task/#part-3-create-the-task", + "text": "At this point, you can use the task create command to run a copy of the task. To run the task, use the following command: l0 task create demo-env echo-tsk one-off-task-dpl:latest --wait You will see the following output: TASK ID TASK NAME ENVIRONMENT DEPLOY SCALE\none-off851c9 echo-tsk demo-env one-off-task-dpl:1 0/1 (1) The SCALE column shows the running, desired and pending counts. A value of 0/1 (1) indicates that running = 0, desired = 1 and (1) for 1 pending task that is about to transition to running state. After your task has finished running, note that the desired count will remain 1 and pending value will no longer be shown, so the value will be 0/1 for a finished task.", + "title": "Part 3: Create the task" + }, + { + "location": "/guides/one_off_task/#part-4-check-the-status-of-the-task", + "text": "To view the logs for this task, and evaluate its progress, you can use the task logs command: l0 task logs one-off-task-tsk You will see the following output: alpine ------ \nTask finished! You can also use the following command for more information in the task. l0 -o json task get echo-tsk Outputs: [\n {\n copies : [\n {\n details : [],\n reason : Waiting for cluster capacity to run ,\n task_copy_id : \n }\n ],\n deploy_id : one-off-task-dpl.2 ,\n deploy_name : one-off-task-dpl ,\n deploy_version : 2 ,\n desired_count : 1,\n environment_id : demoenv669e4 ,\n environment_name : demo-env ,\n pending_count : 1,\n running_count : 0,\n task_id : echotsk1facd ,\n task_name : echo-tsk \n }\n] After the task has finished, running l0 -o json task get echo-tsk will show a pending_count of 0. Outputs: ... copies : [\n {\n details : [\n {\n container_name : alpine ,\n exit_code : 0,\n last_status : STOPPED ,\n reason : \n }\n ],\n reason : Essential container in task exited ,\n task_copy_id : arn:aws:ecs:us-west-2:856306994068:task/0e723c3e-9cd1-4914-8393-b59abd40eb89 \n }\n],\n... pending_count : 0, running_count : 0,\n...", + "title": "Part 4: Check the status of the task" + }, + { + "location": "/reference/cli/", + "text": "Layer0 CLI Reference\n#\n\n\nGlobal options\n#\n\n\nThe \nl0\n application is designed to be used with one of several commands: \nadmin\n, \ndeploy\n, \nenvironment\n, \njob\n, \nloadbalancer\n, \nservice\n, and \ntask\n. These commands are detailed in the sections below. There are, however, some global parameters that you may specify whenever using \nl0\n.\n\n\nUsage\n#\n\n\nl0\n \n[\nglobal\n \noptions\n]\n \ncommand\n \nsubcommand\n \n[\nsubcommand\n \noptions\n]\n \nparams\n\n\n\n\n\n\nGlobal options\n#\n\n\n\n\n-o [text|json], --output [text|json]\n - Specify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the \n--output json\n option, you can force \nl0\n to output JSON-formatted text.\n\n\n-t value, --timeout value\n - Specify the timeout for running \nl0\n commands. Values can be in h, m, s, or ms.\n\n\n-d, --debug\n - Print debug statements\n\n\n-v, --version\n - Display the version number of the \nl0\n application.\n\n\n\n\n\n\nAdmin\n#\n\n\nThe \nadmin\n command is used to manage the Layer0 API server. This command is used with the following subcommands: \ndebug\n, \nsql\n, and \nversion\n.\n\n\nadmin debug\n#\n\n\nUse the \ndebug\n subcommand to view the running version of your Layer0 API server and CLI.\n\n\nUsage\n#\n\n\nl0 admin debug\n\n\n\n\n\nadmin sql\n#\n\n\nUse the \nsql\n subcommand to initialize the Layer0 API database.\n\n\nUsage\n#\n\n\nl0 admin sql\n\n\n\n\n\nAdditional information\n#\n\n\nThe \nsql\n subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so.\n\n\nadmin version\n#\n\n\nUse the \nversion\n subcommand to display the current version of the Layer0 API.\n\n\nUsage\n#\n\n\nl0 admin version \n\n\n\n\n\n\n\nDeploy\n#\n\n\nDeploys are ECS Task Definitions. They are configuration files that detail how to deploy your application.\nThe \ndeploy\n command is used to manage Layer0 environments. This command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n, and \nlist\n.\n\n\ndeploy create\n#\n\n\nUse the \ncreate\n subcommand to upload a Docker task definition into Layer0. \n\n\nUsage\n#\n\n\nl0 deploy create taskDefPath deployName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\ntaskDefPath\n - The path to the Docker task definition that you want to upload.\n\n\ndeployName\n - A name for the deploy.\n\n\n\n\nAdditional information\n#\n\n\nIf \ndeployName\n exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version.\n\n\nIf you use Visual Studio to modify or create your Dockerrun file, you may see an \"Invalid Dockerrun.aws.json\" error. This error is caused by the default encoding used by Visual Studio. See the \n\"Common issues\" page\n for steps to resolve this issue.\n\n\nDeploys created through Layer0 are rendered with a \nlogConfiguration\n section for each container.\nIf a \nlogConfiguration\n section already exists, no changes are made to the section.\nThe additional section enables logs from each container to be sent to the the Layer0 log group.\nThis is where logs are looked up during \nl0 \nentity\n logs\n commands.\nThe added \nlogConfiguration\n section uses the following template:\n\n\nlogConfiguration\n: {\n \nlogDriver\n: \nawslogs\n,\n \noptions\n: {\n \nawslogs-group\n: \nl0-\nprefix\n,\n \nawslogs-region\n: \nregion\n,\n \nawslogs-stream-prefix\n: \nl0\n\n }\n }\n}\n\n\n\n\n\ndeploy delete\n#\n\n\nUse the \ndelete\n subcommand to delete a version of a Layer0 deploy.\n\n\nUsage\n#\n\n\nl0 deploy delete deployName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\ndeployName\n - The name of the Layer0 deploy you want to delete.\n\n\n\n\ndeploy get\n#\n\n\nUse the \nget\n subcommand to view information about an existing Layer0 deploy.\n\n\nUsage\n#\n\n\nl0 deploy get deployName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\ndeployName\n - The name of the Layer0 deploy for which you want to view additional information.\n\n\n\n\nAdditional information\n#\n\n\nThe \nget\n subcommand supports wildcard matching: \nl0 deploy get dep*\n would return all deploys beginning with \ndep\n.\n\n\ndeploy list\n#\n\n\nUse the \nlist\n subcommand to view a list of deploys in your instance of Layer0.\n\n\nUsage\n#\n\n\nl0 deploy list\n\n\n\n\n\n\n\nEnvironment\n#\n\n\nLayer0 environments allow you to isolate services and load balancers for specific applications.\nThe \nenvironment\n command is used to manage Layer0 environments. This command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n, \nlist\n, and \nsetmincount\n.\n\n\nenvironment create\n#\n\n\nUse the \ncreate\n subcommand to create a new Layer0 environment.\n\n\nUsage\n#\n\n\nl0 environment create [--size size | --min-count mincount | \n --user-data path | --os os | --ami amiID] environmentName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nenvironmentName\n - A name for the environment.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--size size\n - The instance size of the EC2 instances to create in your environment (default: m3.medium).\n\n\n--min-count mincount\n - The minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0).\n\n\n--user-data path\n - The user data template file to use for the environment's autoscaling group.\n\n\n--os os\n - The operating system used in the environment. Options are \"linux\" or \"windows\" (default: linux). More information on windows environments is documented below.\n\n\nami amiID\n - A custom EC2 AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system.\n\n\n\n\nThe user data template can be used to add custom configuration to your Layer0 environment. They are usually scripts that are executed at instance launch time to ensure an EC2 instance is in the correct state after the provisioning process finishes.\nLayer0 uses \nGo Templates\n to render user data.\nCurrently, two variables are passed into the template: \nECSEnvironmentID\n and \nS3Bucket\n.\n\n\n\n\nDanger\n\n\nPlease review the \nECS Tutorial\n\nto better understand how to write a user data template, and use at your own risk!\n\n\n\n\nLinux Environments\n: The default Layer0 user data template is:\n\n\n#!/bin/bash\n\n\necho\n \nECS_CLUSTER\n={{\n .ECSEnvironmentID \n}}\n \n /etc/ecs/ecs.config\n\necho\n \nECS_ENGINE_AUTH_TYPE\n=\ndockercfg \n /etc/ecs/ecs.config\nyum install -y aws-cli awslogs jq\naws s3 cp s3://\n{{\n .S3Bucket \n}}\n/bootstrap/dockercfg dockercfg\n\ncfg\n=\n$(\ncat dockercfg\n)\n\n\necho\n \nECS_ENGINE_AUTH_DATA\n=\n$cfg\n \n /etc/ecs/ecs.config\ndocker pull amazon/amazon-ecs-agent:latest\nstart ecs\n\n\n\n\n\nWindows Environments\n: The default Layer0 user data template is:\n\n\npowershell\n\n\n# Set agent env variables for the Machine context (durable)\n\n\n$clusterName\n \n=\n \n{{ .ECSEnvironmentID }}\n\n\nWrite-Host\n \nCluster\n \nname\n \nset\n \nas\n:\n \n$clusterName\n \n-foreground\n \ngreen\n\n\n\n[Environment]\n::\nSetEnvironmentVariable\n(\nECS_CLUSTER\n,\n \n$clusterName\n,\n \nMachine\n)\n\n\n[Environment]\n::\nSetEnvironmentVariable\n(\nECS_ENABLE_TASK_IAM_ROLE\n,\n \nfalse\n,\n \nMachine\n)\n\n\n$agentVersion\n \n=\n \nv1.5.2\n\n\n$agentZipUri\n \n=\n \nhttps://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-windows-$agentVersion.zip\n\n\n$agentZipMD5Uri\n \n=\n \n$agentZipUri.md5\n\n\n\n# Configure docker auth\n\n\nRead-S3Object\n \n-BucketName\n \n{{\n \n.\nS3Bucket\n \n}}\n \n-Key\n \nbootstrap\n/\ndockercfg\n \n-File\n \ndockercfg\n.\njson\n\n\n$dockercfgContent\n \n=\n \n[IO.File]\n::\nReadAllText\n(\ndockercfg.json\n)\n\n\n[Environment]\n::\nSetEnvironmentVariable\n(\nECS_ENGINE_AUTH_DATA\n,\n \n$dockercfgContent\n,\n \nMachine\n)\n\n\n[Environment]\n::\nSetEnvironmentVariable\n(\nECS_ENGINE_AUTH_TYPE\n,\n \ndockercfg\n,\n \nMachine\n)\n\n\n\n### --- Nothing user configurable after this point ---\n\n\n$ecsExeDir\n \n=\n \n$env:ProgramFiles\\Amazon\\ECS\n\n\n$zipFile\n \n=\n \n$env:TEMP\\ecs-agent.zip\n\n\n$md5File\n \n=\n \n$env:TEMP\\ecs-agent.zip.md5\n\n\n\n### Get the files from S3\n\n\nInvoke-RestMethod\n \n-OutFile\n \n$zipFile\n \n-Uri\n \n$agentZipUri\n\n\nInvoke-RestMethod\n \n-OutFile\n \n$md5File\n \n-Uri\n \n$agentZipMD5Uri\n\n\n\n## MD5 Checksum\n\n\n$expectedMD5\n \n=\n \n(\nGet-Content\n \n$md5File\n)\n\n\n$md5\n \n=\n \nNew-Object\n \n-TypeName\n \nSystem\n.\nSecurity\n.\nCryptography\n.\nMD5CryptoServiceProvider\n\n\n$actualMD5\n \n=\n \n[System.BitConverter]\n::\nToString\n(\n$md5\n.\nComputeHash\n(\n[System.IO.File]\n::\nReadAllBytes\n(\n$zipFile\n))).\nreplace\n(\n-\n,\n \n)\n\n\nif\n(\n$expectedMD5\n \n-ne\n \n$actualMD5\n)\n \n{\n\n \necho\n \nDownload doesn\nt match hash.\n\n \necho\n \nExpected: $expectedMD5 - Got: $actualMD5\n\n \nexit\n \n1\n\n\n}\n\n\n\n## Put the executables in the executable directory.\n\n\nExpand-Archive\n \n-Path\n \n$zipFile\n \n-DestinationPath\n \n$ecsExeDir\n \n-Force\n\n\n\n## Start the agent script in the background.\n\n\n$jobname\n \n=\n \nECS-Agent-Init\n\n\n$script\n \n=\n \ncd \n$ecsExeDir\n; .\\amazon-ecs-agent.ps1\n\n\n$repeat\n \n=\n \n(\nNew-TimeSpan\n \n-Minutes\n \n1\n)\n\n\n$jobpath\n \n=\n \n$env:LOCALAPPDATA\n \n+\n \n\\Microsoft\\Windows\\PowerShell\\ScheduledJobs\\$jobname\\ScheduledJobDefinition.xml\n\n\n\nif\n($(\nTest-Path\n \n-Path\n \n$jobpath\n))\n \n{\n\n \necho\n \nJob definition already present\n\n \nexit\n \n0\n\n\n}\n\n\n\n$scriptblock\n \n=\n \n[scriptblock]\n::\nCreate\n(\n$script\n)\n\n\n$trigger\n \n=\n \nNew-JobTrigger\n \n-At\n \n(\nGet-Date\n).\nDate\n \n-RepeatIndefinitely\n \n-RepetitionInterval\n \n$repeat\n \n-Once\n\n\n$options\n \n=\n \nNew-ScheduledJobOption\n \n-RunElevated\n \n-ContinueIfGoingOnBattery\n \n-StartIfOnBattery\n\n\nRegister-ScheduledJob\n \n-Name\n \n$jobname\n \n-ScriptBlock\n \n$scriptblock\n \n-Trigger\n \n$trigger\n \n-ScheduledJobOption\n \n$options\n \n-RunNow\n\n\nAdd-JobTrigger\n \n-Name\n \n$jobname\n \n-Trigger\n \n(\nNew-JobTrigger\n \n-AtStartup\n \n-RandomDelay\n \n00\n:\n1\n:\n00\n)\n\n\n/\npowershell\n\n\npersist\ntrue\n/\npersist\n\n\n\n\n\n\n\n\nWindows Environments\nWindows containers are still in beta. \n\n\n\n\n\n\nYou can view the documented caveats with ECS \nhere\n.\nWhen creating Windows environments in Layer0, the root volume sizes for instances are 200GiB to accommodate the large size of the containers.\n\nIt can take as long as 45 minutes for a new windows container to come online. \n\n\nenvironment delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing Layer0 environment.\n\n\nUsage\n#\n\n\nl0 environment delete [--wait] environmentName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nenvironmentName\n - The name of the Layer0 environment that you want to delete.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--wait\n - Wait until the deletion is complete before exiting.\n\n\n\n\nAdditional information\n#\n\n\nThis operation performs several tasks asynchronously. When run without the \n--wait\n option, this operation will most likely exit before all of these tasks are complete; when run with the \n--wait\n option, this operation will only exit once these tasks have completed.\n\n\nenvironment get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 environment.\n\n\nUsage\n#\n\n\nl0 environment get environmentName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nenvironmentName\n - The name of the Layer0 environment for which you want to view additional information.\n\n\n\n\nAdditional information\n#\n\n\nThe \nget\n subcommand supports wildcard matching: \nl0 environment get test*\n would return all environments beginning with \ntest\n.\n\n\nenvironment list\n#\n\n\nUse the \nlist\n subcommand to display a list of environments in your instance of Layer0.\n\n\nUsage\n#\n\n\nl0 environment list\n\n\n\n\n\nenvironment setmincount\n#\n\n\nUse the \nsetmincount\n subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group.\n\n\nUsage\n#\n\n\nl0 environment setmincount environmentName count\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nenvironmentName\n - The name of the Layer0 environment that you want to adjust.\n\n\ncount\n - The minimum number of instances allowed in the environment's autoscaling group.\n\n\n\n\nenvironment link\n#\n\n\nUse the \nlink\n subcommand to link two environments together. \nWhen environments are linked, services inside the environments are allowed to communicate with each other as if they were in the same environment. \nThis link is bidirectional. \nThis command is idempotent; it will succeed even if the two specified environments are already linked.\n\n\nUsage\n#\n\n\nl0 environment link sourceEnvironmentName destEnvironmentName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nsourceEnvironmentName\n - The name of the source environment to link.\n\n\ndestEnvironmentName\n - The name of the destination environment to link.\n\n\n\n\nenvironment unlink\n#\n\n\nUse the \nunlink\n subcommand to remove the link between two environments.\nThis command is idempotent; it will succeed even if the link does not exist.\n\n\nUsage\n#\n\n\nl0 environment unlink sourceEnvironmentName destEnvironmentName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nsourceEnvironmentName\n - The name of the source environment to unlink.\n\n\ndestEnvironmentName\n - The name of the destination environment to unlink.\n\n\n\n\n\n\nJob\n#\n\n\nA Job is a long-running unit of work performed on behalf of the Layer0 API.\nJobs are executed as Layer0 tasks that run in the \napi\n environment.\nThe \njob\n command is used with the following subcommands: \nlogs\n, \ndelete\n, \nget\n, and \nlist\n.\n\n\njob logs\n#\n\n\nUse the \nlogs\n subcommand to display the logs from a Layer0 job that is currently running.\n\n\nUsage\n#\n\n\nl0 job logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] jobName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\njobName\n - The name of the Layer0 job for which you want to view logs.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--start MM/DD HH:MM\n - The start of the time range to fetch logs.\n\n\n--end MM/DD HH:MM\n - The end of the time range to fetch logs.\n\n\n--tail=N\n - Display only the last \nN\n lines of the log.\n\n\n\n\njob delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing job.\n\n\nUsage\n#\n\n\nl0 job delete jobName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\njobName\n - The name of the job that you want to delete.\n\n\n\n\njob get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 job.\n\n\nUsage\n#\n\n\nl0 job get jobName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\njobName\n - The name of an existing Layer0 job to display.\n\n\n\n\nAdditional information\n#\n\n\nThe \nget\n subcommand supports wildcard matching: \nl0 job get 2a55*\n would return all jobs beginning with \n2a55\n.\n\n\njob list\n#\n\n\nUse the \nlist\n subcommand to display information about all of the existing jobs in an instance of Layer0.\n\n\nUsage\n#\n\n\nl0 job list\n\n\n\n\n\n\n\nLoad Balancer\n#\n\n\nA load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 \nservices\n. The \nloadbalancer\n command is used with the following subcommands: \ncreate\n, \ndelete\n, \naddport\n, \ndropport\n, \nget\n, \nlist\n, and \nhealthcheck\n.\n\n\nloadbalancer create\n#\n\n\nUse the \ncreate\n subcommand to create a new load balancer.\n\n\nUsage\n#\n\n\nl0 loadbalancer create [--port port ... | --certificate certifiateName | \n --private | --healthcheck-target target | --healthcheck-interval interval | \n --healthcheck-timeout timeout | --healthcheck-healthy-threshold healthyThreshold | \n --healthcheck-unhealthy-threshold unhealthyThreshold] environmentName loadBalancerName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nenvironmentName\n - The name of the existing Layer0 environment in which you want to create the load balancer.\n\n\nloadBalancerName\n - A name for the load balancer you are creating.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--port port ...\n - The port configuration for the listener of the load balancer. Valid pattern is \nhostPort:containerPort/protocol\n. Multiple ports can be specified using \n--port port1 --port port2 ...\n (default: \n80/80:TCP\n).\n\n\nhostPort\n - The port that the load balancer will listen for traffic on.\n\n\ncontainerPort\n - The port that the load balancer will forward traffic to.\n\n\nprotocol\n - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS).\n\n\n\n\n\n\n--certificate certificateName\n - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.\n\n\n--private\n - When you use this option, the load balancer will only be accessible from within the Layer0 environment.\n\n\n--healthcheck-target target\n - The target of the check. Valid pattern is \nPROTOCOL:PORT/PATH\n (default: \n\"TCP:80\"\n). \n\n\nIf \nPROTOCOL\n is \nHTTP\n or \nHTTPS\n, both \nPORT\n and \nPATH\n are required. Example: \nHTTP:80/admin/healthcheck\n. \n\n\nIf \nPROTOCOL\n is \nTCP\n or \nSSL\n, \nPORT\n is required and \nPATH\n is not used. Example: \nTCP:80\n\n\n\n\n\n\n--healthcheck-interval interval\n - The interval between checks (default: \n30\n).\n\n\n--healthcheck-timeout timeout\n - The length of time before the check times out (default: \n5\n).\n\n\n--healthcheck-healthy-threshold healthyThreshold\n - The number of checks before the instance is declared healthy (default: \n2\n).\n\n\n--healthcheck-unhealthy-threshold unhealthyThreshold\n - The number of checks before the instance is declared unhealthy (default: \n2\n).\n\n\n\n\n\n\nPorts and Health Checks\n\n\nWhen both the \n--port\n and the \n--healthcheck-target\n options are omitted, Layer0 configures the load balancer with some default values: \n80:80/TCP\n for ports and \nTCP:80\n for healthcheck target.\nThese default values together create a load balancer configured with a simple but functioning health check, opening up a set of ports that allows traffic to the target of the healthcheck.\n(\n--healthcheck-target TCP:80\n tells the load balancer to ping its services at port 80 to determine their status, and \n--port 80:80/TCP\n configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services)\n\n\nWhen creating a load balancer with non-default configurations for either \n--port\n or \n--healthcheck-target\n, make sure that a valid \n--port\n and \n--healthcheck-target\n pairing is also created.\n\n\n\n\nloadbalancer delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing load balancer.\n\n\nUsage\n#\n\n\nl0 loadbalancer delete [--wait] loadBalancerName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nloadBalancerName\n - The name of the load balancer that you want to delete.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--wait\n - Wait until the deletion is complete before exiting.\n\n\n\n\nAdditional information\n#\n\n\nIn order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer.\n\n\nThis operation performs several tasks asynchronously. When run without the \n--wait\n option, this operation will most likely exit before all of these tasks are complete; when run with the \n--wait\n option, this operation will only exit once these tasks have completed\n.\n\n\nloadbalancer addport\n#\n\n\nUse the \naddport\n subcommand to add a new port configuration to an existing Layer0 load balancer.\n\n\nUsage\n#\n\n\nl0 loadbalancer addport [--certificate certificateName] loadBalancerName port\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nloadBalancerName\n - The name of an existing Layer0 load balancer in which you want to add the port configuration.\n\n\nport\n - The port configuration for the listener of the load balancer. Valid pattern is \nhostPort:containerPort/protocol\n.\n\n\nhostPort\n - The port that the load balancer will listen for traffic on.\n\n\ncontainerPort\n - The port that the load balancer will forward traffic to.\n\n\nprotocol\n - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS).\n\n\n\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--certificate certificateName\n - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.\n\n\n\n\nAdditional information\n#\n\n\nThe port configuration you specify must not already be in use by the load balancer you specify.\n\n\nloadbalancer dropport\n#\n\n\nUse the \ndropport\n subcommand to remove a port configuration from an existing Layer0 load balancer.\n\n\nUsage\n#\n\n\nl0 loadbalancer dropport loadBalancerName hostPort\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nloadBalancerName\n- The name of an existing Layer0 load balancer from which you want to remove the port configuration.\n\n\nhostPort\n- The host port to remove from the load balancer.\n\n\n\n\nloadbalancer get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 load balancer.\n\n\nUsage\n#\n\n\nl0 loadbalancer get [environmentName:]loadBalancerName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\n[environmentName:]loadBalancerName\n - The name of an existing Layer0 load balancer. You can optionally provide the Layer0 environment (\nenvironmentName\n) associated with the Load Balancer\n\n\n\n\nAdditional information\n#\n\n\nThe \nget\n subcommand supports wildcard matching: \nl0 loadbalancer get entrypoint*\n would return all jobs beginning with \nentrypoint\n.\n\n\nloadbalancer list\n#\n\n\nUse the \nlist\n subcommand to display information about all of the existing load balancers in an instance of Layer0.\n\n\nUsage\n#\n\n\nl0 loadbalancer list\n\n\n\n\n\nloadbalancer healthcheck\n#\n\n\nUse the \nhealthcheck\n subcommand to display information about or update the configuration of a load balancer's health check.\n\n\nUsage\n#\n\n\nl0 loadbalancer healthcheck [--set-target target | --set-interval interval | \n --set-timeout timeout | --set-healthy-threshold healthyThreshold | \n --set-unhealthy-threshold unhealthyThreshold] loadbalancerName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nloadBalancerName\n - The name of the existing Layer0 load balancer you are modifying.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--set-target target\n - The target of the check. Valid pattern is \nPROTOCOL:PORT/PATH\n.\n\n\nIf \nPROTOCOL\n is \nHTTP\n or \nHTTPS\n, both \nPORT\n and \nPATH\n are required. Example: \nHTTP:80/admin/healthcheck\n.\n\n\nIf \nPROTOCOL\n is \nTCP\n or \nSSL\n, \nPORT\n is required and \nPATH\n is not used. Example: \nTCP:80\n\n\n\n\n\n\n--set-interval interval\n - The interval between health checks.\n\n\n--set-timeout timeout\n - The length of time in seconds before the health check times out.\n\n\n--set-healthy-threshold healthyThreshold\n - The number of checks before the instance is declared healthy.\n\n\n--set-unhealthy-threshold unhealthyThreshold\n - The number of checks before the instance is declared unhealthy.\n\n\n\n\nAdditional information\n#\n\n\nCalling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged.\n\n\n\n\nService\n#\n\n\nA service is a component of a Layer0 environment. The purpose of a service is to execute a Docker image specified in a \ndeploy\n. In order to create a service, you must first create an \nenvironment\n and a \ndeploy\n; in most cases, you should also create a \nload balancer\n before creating the service.\n\n\nThe \nservice\n command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n, \nupdate\n, \nlist\n, \nlogs\n, and \nscale\n.\n\n\nservice create\n#\n\n\nUse the \ncreate\n subcommand to create a Layer0 service.\n\n\nUsage\n#\n\n\nl0 service create [--loadbalancer [environmentName:]loadBalancerName | \n --no-logs] environmentName serviceName deployName[:deployVersion]\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nserviceName\n - A name for the service that you are creating.\n\n\nenvironmentName\n - The name of an existing Layer0 environment.\n\n\ndeployName[:deployVersion]\n - The name of a Layer0 deploy that exists in the environment \nenvironmentName\n. You can optionally specify the version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--loadbalancer [environmentName:]loadBalancerName\n - Place the new service behind an existing load balancer \nloadBalancerName\n. You can optionally specify the Layer0 environment (\nenvironmentName\n) where the load balancer exists.\n\n\n--no-logs\n - Disable cloudwatch logging for the service\n\n\n\n\nservice update\n#\n\n\nUse the \nupdate\n subcommand to apply an existing Layer0 Deploy to an existing Layer0 service.\n\n\nUsage\n#\n\n\nl0 service update [--no-logs] [environmentName:]serviceName deployName[:deployVersion]\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\n[environmentName:]serviceName\n - The name of an existing Layer0 service into which you want to apply the deploy. You can optionally specify the Layer0 environment (\nenvironmentName\n) of the service.\n\n\ndeployName[:deployVersion]\n - The name of the Layer0 deploy that you want to apply to the service. You can optionally specify a specific version of the deploy (\ndeployVersion\n). If you do not specify a version number, the latest version of the deploy will be applied.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--no-logs\n - Disable cloudwatch logging for the service\n\n\n\n\nAdditional information\n#\n\n\nIf your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead.\n\n\nservice delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing Layer0 service.\n\n\nUsage\n#\n\n\nl0 service delete [--wait] [environmentName:]serviceName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\n[environmentName:]serviceName\n - The name of the Layer0 service that you want to delete. You can optionally provide the Layer0 environment (\nenvironmentName\n) of the service.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--wait\n - Wait until the deletion is complete before exiting.\n\n\n\n\nAdditional information\n#\n\n\nThis operation performs several tasks asynchronously. When run without the \n--wait\n option, this operation will most likely exit before all of these tasks are complete; when run with the \n--wait\n option, this operation will only exit once these tasks have completed.\n\n\nservice get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 service.\n\n\nUsage\n#\n\n\nl0 service get [environmentName:]serviceName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\n[environmentName:]serviceName\n - The name of an existing Layer0 service. You can optionally provide the Layer0 environment (\nenvironmentName\n) of the service.\n\n\n\n\nservice list\n#\n\n\nUse the \nlist\n subcommand to list all of the existing services in your Layer0 instance.\n\n\nUsage\n#\n\n\nl0 service get list\n\n\n\n\n\nservice logs\n#\n\n\nUse the \nlogs\n subcommand to display the logs from a Layer0 service that is currently running.\n\n\nUsage\n#\n\n\nl0 service logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] serviceName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nserviceName\n - The name of the Layer0 service for which you want to view logs.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--start MM/DD HH:MM\n - The start of the time range to fetch logs.\n\n\n--end MM/DD HH:MM\n - The end of the time range to fetch logs.\n\n\n--tail=N\n - Display only the last \nN\n lines of the log.\n\n\n\n\nservice scale\n#\n\n\nUse the \nscale\n subcommand to specify how many copies of an existing Layer0 service should run.\n\n\nUsage\n#\n\n\nl0 service scale [environmentName:]serviceName copies\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\n[environmentName:]serviceName\n - The name of the Layer0 service that you want to scale up. You can optionally provide the Layer0 environment (\nenvironmentName\n) of the service.\n\n\ncopies\n - The number of copies of the specified service that should be run.\n\n\n\n\n\n\nTask\n#\n\n\nA Layer0 task is a component of an environment. A task executes the contents of a Docker image, as specified in a deploy. A task differs from a service in that a task does not restart after exiting. Additionally, ports are not exposed when using a task.\n\n\nThe \ntask\n command is used with the following subcommands: \ncreate\n, \ndelete\n, \nget\n, \nlist\n, and \nlogs\n.\n\n\ntask create\n#\n\n\nUse the \ncreate\n subcommand to create a Layer0 task.\n\n\nUsage\n#\n\n\nl0 task create [--copies copies | --no-logs] environmentName taskName deployName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\nenvironmentName\n - The name of the existing Layer0 environment in which you want to create the task.\n\n\ntaskName\n - A name for the task.\n\n\ndeployName\n - The name of an existing Layer0 deploy that the task should use.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--copies copies\n - The number of copies of the task to run (default: 1).\n\n\n--no-logs\n - Disable cloudwatch logging for the service.\n\n\n\n\ntask delete\n#\n\n\nUse the \ndelete\n subcommand to delete an existing Layer0 task.\n\n\nUsage\n#\n\n\nl0 task delete [environmentName:]taskName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\n[environmentName:]taskName\n - The name of the Layer0 task that you want to delete. You can optionally specify the name of the Layer0 environment that contains the task. This parameter is only required if mulitiple environments contain tasks with exactly the same name.\n\n\n\n\nAdditional information\n#\n\n\nUntil the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour.\n\n\ntask get\n#\n\n\nUse the \nget\n subcommand to display information about an existing Layer0 task (\ntaskName\n).\n\n\nUsage\n#\n\n\nl0 task get [environmentName:]taskName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\n[environmentName:]taskName\n - The name of a Layer0 task for which you want to see information. You can optionally specify the name of the Layer0 Environment that contains the task.\n\n\n\n\nAdditional information\n#\n\n\nThe value of \ntaskName\n does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in \ntaskName\n, then information about all matching tasks will be returned.\n\n\ntask list\n#\n\n\nUse the \ntask\n subcommand to display a list of running tasks in your Layer0.\n\n\nUsage\n#\n\n\nl0 task list\n\n\n\n\n\ntask logs\n#\n\n\nUse the \nlogs\n subcommand to display logs for a running Layer0 task.\n\n\nUsage\n#\n\n\nl0 task logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] taskName\n\n\n\n\n\nRequired parameters\n#\n\n\n\n\ntaskName\n - The name of an existing Layer0 task.\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--start MM/DD HH:MM\n - The start of the time range to fetch logs.\n\n\n--end MM/DD HH:MM\n - The end of the time range to fetch logs.\n\n\n--tail=N\n - Display only the last \nN\n lines of the log.\n\n\n\n\nAdditional information\n#\n\n\nThe value of \ntaskName\n does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in \ntaskName\n, then information about all matching tasks will be returned.\n\n\ntask list\n#\n\n\nUse the \nlist\n subcommand to display a list of running tasks in your Layer0.\n\n\nUsage\n#\n\n\nl0 task list", + "title": "Layer0 CLI" + }, + { + "location": "/reference/cli/#layer0-cli-reference", + "text": "", + "title": "Layer0 CLI Reference" + }, + { + "location": "/reference/cli/#global-options", + "text": "The l0 application is designed to be used with one of several commands: admin , deploy , environment , job , loadbalancer , service , and task . These commands are detailed in the sections below. There are, however, some global parameters that you may specify whenever using l0 .", + "title": "Global options" + }, + { + "location": "/reference/cli/#usage", + "text": "l0 [ global options ] command subcommand [ subcommand options ] params", + "title": "Usage" + }, + { + "location": "/reference/cli/#global-options_1", + "text": "-o [text|json], --output [text|json] - Specify the format of Layer0 outputs. By default, Layer0 outputs unformatted text; by issuing the --output json option, you can force l0 to output JSON-formatted text. -t value, --timeout value - Specify the timeout for running l0 commands. Values can be in h, m, s, or ms. -d, --debug - Print debug statements -v, --version - Display the version number of the l0 application.", + "title": "Global options" + }, + { + "location": "/reference/cli/#admin", + "text": "The admin command is used to manage the Layer0 API server. This command is used with the following subcommands: debug , sql , and version .", + "title": "Admin" + }, + { + "location": "/reference/cli/#admin-debug", + "text": "Use the debug subcommand to view the running version of your Layer0 API server and CLI.", + "title": "admin debug" + }, + { + "location": "/reference/cli/#usage_1", + "text": "l0 admin debug", + "title": "Usage" + }, + { + "location": "/reference/cli/#admin-sql", + "text": "Use the sql subcommand to initialize the Layer0 API database.", + "title": "admin sql" + }, + { + "location": "/reference/cli/#usage_2", + "text": "l0 admin sql", + "title": "Usage" + }, + { + "location": "/reference/cli/#additional-information", + "text": "The sql subcommand is automatically executed during the Layer0 installation process; we recommend that you do not use this subcommand unless specifically directed to do so.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#admin-version", + "text": "Use the version subcommand to display the current version of the Layer0 API.", + "title": "admin version" + }, + { + "location": "/reference/cli/#usage_3", + "text": "l0 admin version", + "title": "Usage" + }, + { + "location": "/reference/cli/#deploy", + "text": "Deploys are ECS Task Definitions. They are configuration files that detail how to deploy your application.\nThe deploy command is used to manage Layer0 environments. This command is used with the following subcommands: create , delete , get , and list .", + "title": "Deploy" + }, + { + "location": "/reference/cli/#deploy-create", + "text": "Use the create subcommand to upload a Docker task definition into Layer0.", + "title": "deploy create" + }, + { + "location": "/reference/cli/#usage_4", + "text": "l0 deploy create taskDefPath deployName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters", + "text": "taskDefPath - The path to the Docker task definition that you want to upload. deployName - A name for the deploy.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#additional-information_1", + "text": "If deployName exactly matches the name of an existing Layer0 deploy, then the version number of that deploy will increase by 1, and the task definition you specified will replace the task definition specified in the previous version. If you use Visual Studio to modify or create your Dockerrun file, you may see an \"Invalid Dockerrun.aws.json\" error. This error is caused by the default encoding used by Visual Studio. See the \"Common issues\" page for steps to resolve this issue. Deploys created through Layer0 are rendered with a logConfiguration section for each container.\nIf a logConfiguration section already exists, no changes are made to the section.\nThe additional section enables logs from each container to be sent to the the Layer0 log group.\nThis is where logs are looked up during l0 entity logs commands.\nThe added logConfiguration section uses the following template: logConfiguration : {\n logDriver : awslogs ,\n options : {\n awslogs-group : l0- prefix ,\n awslogs-region : region ,\n awslogs-stream-prefix : l0 \n }\n }\n}", + "title": "Additional information" + }, + { + "location": "/reference/cli/#deploy-delete", + "text": "Use the delete subcommand to delete a version of a Layer0 deploy.", + "title": "deploy delete" + }, + { + "location": "/reference/cli/#usage_5", + "text": "l0 deploy delete deployName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_1", + "text": "deployName - The name of the Layer0 deploy you want to delete.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#deploy-get", + "text": "Use the get subcommand to view information about an existing Layer0 deploy.", + "title": "deploy get" + }, + { + "location": "/reference/cli/#usage_6", + "text": "l0 deploy get deployName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_2", + "text": "deployName - The name of the Layer0 deploy for which you want to view additional information.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#additional-information_2", + "text": "The get subcommand supports wildcard matching: l0 deploy get dep* would return all deploys beginning with dep .", + "title": "Additional information" + }, + { + "location": "/reference/cli/#deploy-list", + "text": "Use the list subcommand to view a list of deploys in your instance of Layer0.", + "title": "deploy list" + }, + { + "location": "/reference/cli/#usage_7", + "text": "l0 deploy list", + "title": "Usage" + }, + { + "location": "/reference/cli/#environment", + "text": "Layer0 environments allow you to isolate services and load balancers for specific applications.\nThe environment command is used to manage Layer0 environments. This command is used with the following subcommands: create , delete , get , list , and setmincount .", + "title": "Environment" + }, + { + "location": "/reference/cli/#environment-create", + "text": "Use the create subcommand to create a new Layer0 environment.", + "title": "environment create" + }, + { + "location": "/reference/cli/#usage_8", + "text": "l0 environment create [--size size | --min-count mincount | \n --user-data path | --os os | --ami amiID] environmentName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_3", + "text": "environmentName - A name for the environment.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments", + "text": "--size size - The instance size of the EC2 instances to create in your environment (default: m3.medium). --min-count mincount - The minimum number of EC2 instances allowed in the environment's autoscaling group (default: 0). --user-data path - The user data template file to use for the environment's autoscaling group. --os os - The operating system used in the environment. Options are \"linux\" or \"windows\" (default: linux). More information on windows environments is documented below. ami amiID - A custom EC2 AMI ID to use in the environment. If not specified, Layer0 will use its default AMI ID for the specified operating system. The user data template can be used to add custom configuration to your Layer0 environment. They are usually scripts that are executed at instance launch time to ensure an EC2 instance is in the correct state after the provisioning process finishes.\nLayer0 uses Go Templates to render user data.\nCurrently, two variables are passed into the template: ECSEnvironmentID and S3Bucket . Danger Please review the ECS Tutorial \nto better understand how to write a user data template, and use at your own risk! Linux Environments : The default Layer0 user data template is: #!/bin/bash echo ECS_CLUSTER ={{ .ECSEnvironmentID }} /etc/ecs/ecs.config echo ECS_ENGINE_AUTH_TYPE = dockercfg /etc/ecs/ecs.config\nyum install -y aws-cli awslogs jq\naws s3 cp s3:// {{ .S3Bucket }} /bootstrap/dockercfg dockercfg cfg = $( cat dockercfg ) echo ECS_ENGINE_AUTH_DATA = $cfg /etc/ecs/ecs.config\ndocker pull amazon/amazon-ecs-agent:latest\nstart ecs Windows Environments : The default Layer0 user data template is: powershell # Set agent env variables for the Machine context (durable) $clusterName = {{ .ECSEnvironmentID }} Write-Host Cluster name set as : $clusterName -foreground green [Environment] :: SetEnvironmentVariable ( ECS_CLUSTER , $clusterName , Machine ) [Environment] :: SetEnvironmentVariable ( ECS_ENABLE_TASK_IAM_ROLE , false , Machine ) $agentVersion = v1.5.2 $agentZipUri = https://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-windows-$agentVersion.zip $agentZipMD5Uri = $agentZipUri.md5 # Configure docker auth Read-S3Object -BucketName {{ . S3Bucket }} -Key bootstrap / dockercfg -File dockercfg . json $dockercfgContent = [IO.File] :: ReadAllText ( dockercfg.json ) [Environment] :: SetEnvironmentVariable ( ECS_ENGINE_AUTH_DATA , $dockercfgContent , Machine ) [Environment] :: SetEnvironmentVariable ( ECS_ENGINE_AUTH_TYPE , dockercfg , Machine ) ### --- Nothing user configurable after this point --- $ecsExeDir = $env:ProgramFiles\\Amazon\\ECS $zipFile = $env:TEMP\\ecs-agent.zip $md5File = $env:TEMP\\ecs-agent.zip.md5 ### Get the files from S3 Invoke-RestMethod -OutFile $zipFile -Uri $agentZipUri Invoke-RestMethod -OutFile $md5File -Uri $agentZipMD5Uri ## MD5 Checksum $expectedMD5 = ( Get-Content $md5File ) $md5 = New-Object -TypeName System . Security . Cryptography . MD5CryptoServiceProvider $actualMD5 = [System.BitConverter] :: ToString ( $md5 . ComputeHash ( [System.IO.File] :: ReadAllBytes ( $zipFile ))). replace ( - , ) if ( $expectedMD5 -ne $actualMD5 ) { \n echo Download doesn t match hash. \n echo Expected: $expectedMD5 - Got: $actualMD5 \n exit 1 } ## Put the executables in the executable directory. Expand-Archive -Path $zipFile -DestinationPath $ecsExeDir -Force ## Start the agent script in the background. $jobname = ECS-Agent-Init $script = cd $ecsExeDir ; .\\amazon-ecs-agent.ps1 $repeat = ( New-TimeSpan -Minutes 1 ) $jobpath = $env:LOCALAPPDATA + \\Microsoft\\Windows\\PowerShell\\ScheduledJobs\\$jobname\\ScheduledJobDefinition.xml if ($( Test-Path -Path $jobpath )) { \n echo Job definition already present \n exit 0 } $scriptblock = [scriptblock] :: Create ( $script ) $trigger = New-JobTrigger -At ( Get-Date ). Date -RepeatIndefinitely -RepetitionInterval $repeat -Once $options = New-ScheduledJobOption -RunElevated -ContinueIfGoingOnBattery -StartIfOnBattery Register-ScheduledJob -Name $jobname -ScriptBlock $scriptblock -Trigger $trigger -ScheduledJobOption $options -RunNow Add-JobTrigger -Name $jobname -Trigger ( New-JobTrigger -AtStartup -RandomDelay 00 : 1 : 00 ) / powershell persist true / persist Windows Environments Windows containers are still in beta. You can view the documented caveats with ECS here .\nWhen creating Windows environments in Layer0, the root volume sizes for instances are 200GiB to accommodate the large size of the containers. \nIt can take as long as 45 minutes for a new windows container to come online.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#environment-delete", + "text": "Use the delete subcommand to delete an existing Layer0 environment.", + "title": "environment delete" + }, + { + "location": "/reference/cli/#usage_9", + "text": "l0 environment delete [--wait] environmentName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_4", + "text": "environmentName - The name of the Layer0 environment that you want to delete.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_1", + "text": "--wait - Wait until the deletion is complete before exiting.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#additional-information_3", + "text": "This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#environment-get", + "text": "Use the get subcommand to display information about an existing Layer0 environment.", + "title": "environment get" + }, + { + "location": "/reference/cli/#usage_10", + "text": "l0 environment get environmentName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_5", + "text": "environmentName - The name of the Layer0 environment for which you want to view additional information.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#additional-information_4", + "text": "The get subcommand supports wildcard matching: l0 environment get test* would return all environments beginning with test .", + "title": "Additional information" + }, + { + "location": "/reference/cli/#environment-list", + "text": "Use the list subcommand to display a list of environments in your instance of Layer0.", + "title": "environment list" + }, + { + "location": "/reference/cli/#usage_11", + "text": "l0 environment list", + "title": "Usage" + }, + { + "location": "/reference/cli/#environment-setmincount", + "text": "Use the setmincount subcommand to set the minimum number of EC2 instances allowed the environment's autoscaling group.", + "title": "environment setmincount" + }, + { + "location": "/reference/cli/#usage_12", + "text": "l0 environment setmincount environmentName count", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_6", + "text": "environmentName - The name of the Layer0 environment that you want to adjust. count - The minimum number of instances allowed in the environment's autoscaling group.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#environment-link", + "text": "Use the link subcommand to link two environments together. \nWhen environments are linked, services inside the environments are allowed to communicate with each other as if they were in the same environment. \nThis link is bidirectional. \nThis command is idempotent; it will succeed even if the two specified environments are already linked.", + "title": "environment link" + }, + { + "location": "/reference/cli/#usage_13", + "text": "l0 environment link sourceEnvironmentName destEnvironmentName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_7", + "text": "sourceEnvironmentName - The name of the source environment to link. destEnvironmentName - The name of the destination environment to link.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#environment-unlink", + "text": "Use the unlink subcommand to remove the link between two environments.\nThis command is idempotent; it will succeed even if the link does not exist.", + "title": "environment unlink" + }, + { + "location": "/reference/cli/#usage_14", + "text": "l0 environment unlink sourceEnvironmentName destEnvironmentName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_8", + "text": "sourceEnvironmentName - The name of the source environment to unlink. destEnvironmentName - The name of the destination environment to unlink.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#job", + "text": "A Job is a long-running unit of work performed on behalf of the Layer0 API.\nJobs are executed as Layer0 tasks that run in the api environment.\nThe job command is used with the following subcommands: logs , delete , get , and list .", + "title": "Job" + }, + { + "location": "/reference/cli/#job-logs", + "text": "Use the logs subcommand to display the logs from a Layer0 job that is currently running.", + "title": "job logs" + }, + { + "location": "/reference/cli/#usage_15", + "text": "l0 job logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] jobName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_9", + "text": "jobName - The name of the Layer0 job for which you want to view logs.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_2", + "text": "--start MM/DD HH:MM - The start of the time range to fetch logs. --end MM/DD HH:MM - The end of the time range to fetch logs. --tail=N - Display only the last N lines of the log.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#job-delete", + "text": "Use the delete subcommand to delete an existing job.", + "title": "job delete" + }, + { + "location": "/reference/cli/#usage_16", + "text": "l0 job delete jobName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_10", + "text": "jobName - The name of the job that you want to delete.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#job-get", + "text": "Use the get subcommand to display information about an existing Layer0 job.", + "title": "job get" + }, + { + "location": "/reference/cli/#usage_17", + "text": "l0 job get jobName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_11", + "text": "jobName - The name of an existing Layer0 job to display.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#additional-information_5", + "text": "The get subcommand supports wildcard matching: l0 job get 2a55* would return all jobs beginning with 2a55 .", + "title": "Additional information" + }, + { + "location": "/reference/cli/#job-list", + "text": "Use the list subcommand to display information about all of the existing jobs in an instance of Layer0.", + "title": "job list" + }, + { + "location": "/reference/cli/#usage_18", + "text": "l0 job list", + "title": "Usage" + }, + { + "location": "/reference/cli/#load-balancer", + "text": "A load balancer is a component of a Layer0 environment. Load balancers listen for traffic on certain ports, and then forward that traffic to Layer0 services . The loadbalancer command is used with the following subcommands: create , delete , addport , dropport , get , list , and healthcheck .", + "title": "Load Balancer" + }, + { + "location": "/reference/cli/#loadbalancer-create", + "text": "Use the create subcommand to create a new load balancer.", + "title": "loadbalancer create" + }, + { + "location": "/reference/cli/#usage_19", + "text": "l0 loadbalancer create [--port port ... | --certificate certifiateName | \n --private | --healthcheck-target target | --healthcheck-interval interval | \n --healthcheck-timeout timeout | --healthcheck-healthy-threshold healthyThreshold | \n --healthcheck-unhealthy-threshold unhealthyThreshold] environmentName loadBalancerName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_12", + "text": "environmentName - The name of the existing Layer0 environment in which you want to create the load balancer. loadBalancerName - A name for the load balancer you are creating.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_3", + "text": "--port port ... - The port configuration for the listener of the load balancer. Valid pattern is hostPort:containerPort/protocol . Multiple ports can be specified using --port port1 --port port2 ... (default: 80/80:TCP ). hostPort - The port that the load balancer will listen for traffic on. containerPort - The port that the load balancer will forward traffic to. protocol - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS). --certificate certificateName - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration. --private - When you use this option, the load balancer will only be accessible from within the Layer0 environment. --healthcheck-target target - The target of the check. Valid pattern is PROTOCOL:PORT/PATH (default: \"TCP:80\" ). If PROTOCOL is HTTP or HTTPS , both PORT and PATH are required. Example: HTTP:80/admin/healthcheck . If PROTOCOL is TCP or SSL , PORT is required and PATH is not used. Example: TCP:80 --healthcheck-interval interval - The interval between checks (default: 30 ). --healthcheck-timeout timeout - The length of time before the check times out (default: 5 ). --healthcheck-healthy-threshold healthyThreshold - The number of checks before the instance is declared healthy (default: 2 ). --healthcheck-unhealthy-threshold unhealthyThreshold - The number of checks before the instance is declared unhealthy (default: 2 ). Ports and Health Checks When both the --port and the --healthcheck-target options are omitted, Layer0 configures the load balancer with some default values: 80:80/TCP for ports and TCP:80 for healthcheck target.\nThese default values together create a load balancer configured with a simple but functioning health check, opening up a set of ports that allows traffic to the target of the healthcheck.\n( --healthcheck-target TCP:80 tells the load balancer to ping its services at port 80 to determine their status, and --port 80:80/TCP configures a security group to allow traffic to pass between port 80 of the load balancer and port 80 of its services) When creating a load balancer with non-default configurations for either --port or --healthcheck-target , make sure that a valid --port and --healthcheck-target pairing is also created.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#loadbalancer-delete", + "text": "Use the delete subcommand to delete an existing load balancer.", + "title": "loadbalancer delete" + }, + { + "location": "/reference/cli/#usage_20", + "text": "l0 loadbalancer delete [--wait] loadBalancerName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_13", + "text": "loadBalancerName - The name of the load balancer that you want to delete.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_4", + "text": "--wait - Wait until the deletion is complete before exiting.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#additional-information_6", + "text": "In order to delete a load balancer that is already attached to a service, you must first delete the service that uses the load balancer. This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed\n.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#loadbalancer-addport", + "text": "Use the addport subcommand to add a new port configuration to an existing Layer0 load balancer.", + "title": "loadbalancer addport" + }, + { + "location": "/reference/cli/#usage_21", + "text": "l0 loadbalancer addport [--certificate certificateName] loadBalancerName port", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_14", + "text": "loadBalancerName - The name of an existing Layer0 load balancer in which you want to add the port configuration. port - The port configuration for the listener of the load balancer. Valid pattern is hostPort:containerPort/protocol . hostPort - The port that the load balancer will listen for traffic on. containerPort - The port that the load balancer will forward traffic to. protocol - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS).", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_5", + "text": "--certificate certificateName - The name of an existing Layer0 certificate. You must include this option if you are using an HTTPS port configuration.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#additional-information_7", + "text": "The port configuration you specify must not already be in use by the load balancer you specify.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#loadbalancer-dropport", + "text": "Use the dropport subcommand to remove a port configuration from an existing Layer0 load balancer.", + "title": "loadbalancer dropport" + }, + { + "location": "/reference/cli/#usage_22", + "text": "l0 loadbalancer dropport loadBalancerName hostPort", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_15", + "text": "loadBalancerName - The name of an existing Layer0 load balancer from which you want to remove the port configuration. hostPort - The host port to remove from the load balancer.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#loadbalancer-get", + "text": "Use the get subcommand to display information about an existing Layer0 load balancer.", + "title": "loadbalancer get" + }, + { + "location": "/reference/cli/#usage_23", + "text": "l0 loadbalancer get [environmentName:]loadBalancerName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_16", + "text": "[environmentName:]loadBalancerName - The name of an existing Layer0 load balancer. You can optionally provide the Layer0 environment ( environmentName ) associated with the Load Balancer", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#additional-information_8", + "text": "The get subcommand supports wildcard matching: l0 loadbalancer get entrypoint* would return all jobs beginning with entrypoint .", + "title": "Additional information" + }, + { + "location": "/reference/cli/#loadbalancer-list", + "text": "Use the list subcommand to display information about all of the existing load balancers in an instance of Layer0.", + "title": "loadbalancer list" + }, + { + "location": "/reference/cli/#usage_24", + "text": "l0 loadbalancer list", + "title": "Usage" + }, + { + "location": "/reference/cli/#loadbalancer-healthcheck", + "text": "Use the healthcheck subcommand to display information about or update the configuration of a load balancer's health check.", + "title": "loadbalancer healthcheck" + }, + { + "location": "/reference/cli/#usage_25", + "text": "l0 loadbalancer healthcheck [--set-target target | --set-interval interval | \n --set-timeout timeout | --set-healthy-threshold healthyThreshold | \n --set-unhealthy-threshold unhealthyThreshold] loadbalancerName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_17", + "text": "loadBalancerName - The name of the existing Layer0 load balancer you are modifying.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_6", + "text": "--set-target target - The target of the check. Valid pattern is PROTOCOL:PORT/PATH . If PROTOCOL is HTTP or HTTPS , both PORT and PATH are required. Example: HTTP:80/admin/healthcheck . If PROTOCOL is TCP or SSL , PORT is required and PATH is not used. Example: TCP:80 --set-interval interval - The interval between health checks. --set-timeout timeout - The length of time in seconds before the health check times out. --set-healthy-threshold healthyThreshold - The number of checks before the instance is declared healthy. --set-unhealthy-threshold unhealthyThreshold - The number of checks before the instance is declared unhealthy.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#additional-information_9", + "text": "Calling the subcommand without flags will display the current configuration of the load balancer's health check. Setting any of the flags will update the corresponding field in the health check, and all omitted flags will leave the corresponding fields unchanged.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#service", + "text": "A service is a component of a Layer0 environment. The purpose of a service is to execute a Docker image specified in a deploy . In order to create a service, you must first create an environment and a deploy ; in most cases, you should also create a load balancer before creating the service. The service command is used with the following subcommands: create , delete , get , update , list , logs , and scale .", + "title": "Service" + }, + { + "location": "/reference/cli/#service-create", + "text": "Use the create subcommand to create a Layer0 service.", + "title": "service create" + }, + { + "location": "/reference/cli/#usage_26", + "text": "l0 service create [--loadbalancer [environmentName:]loadBalancerName | \n --no-logs] environmentName serviceName deployName[:deployVersion]", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_18", + "text": "serviceName - A name for the service that you are creating. environmentName - The name of an existing Layer0 environment. deployName[:deployVersion] - The name of a Layer0 deploy that exists in the environment environmentName . You can optionally specify the version number of the Layer0 deploy that you want to deploy. If you do not specify a version number, the latest version of the deploy will be used.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_7", + "text": "--loadbalancer [environmentName:]loadBalancerName - Place the new service behind an existing load balancer loadBalancerName . You can optionally specify the Layer0 environment ( environmentName ) where the load balancer exists. --no-logs - Disable cloudwatch logging for the service", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#service-update", + "text": "Use the update subcommand to apply an existing Layer0 Deploy to an existing Layer0 service.", + "title": "service update" + }, + { + "location": "/reference/cli/#usage_27", + "text": "l0 service update [--no-logs] [environmentName:]serviceName deployName[:deployVersion]", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_19", + "text": "[environmentName:]serviceName - The name of an existing Layer0 service into which you want to apply the deploy. You can optionally specify the Layer0 environment ( environmentName ) of the service. deployName[:deployVersion] - The name of the Layer0 deploy that you want to apply to the service. You can optionally specify a specific version of the deploy ( deployVersion ). If you do not specify a version number, the latest version of the deploy will be applied.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_8", + "text": "--no-logs - Disable cloudwatch logging for the service", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#additional-information_10", + "text": "If your service uses a load balancer, when you update the task definition for the service, the container name and container port that were specified when the service was created must remain the same in the task definition. In other words, if your service has a load balancer, you cannot apply any deploy you want to that service. If you are varying the container name or exposed ports, you must create a new service instead.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#service-delete", + "text": "Use the delete subcommand to delete an existing Layer0 service.", + "title": "service delete" + }, + { + "location": "/reference/cli/#usage_28", + "text": "l0 service delete [--wait] [environmentName:]serviceName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_20", + "text": "[environmentName:]serviceName - The name of the Layer0 service that you want to delete. You can optionally provide the Layer0 environment ( environmentName ) of the service.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_9", + "text": "--wait - Wait until the deletion is complete before exiting.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#additional-information_11", + "text": "This operation performs several tasks asynchronously. When run without the --wait option, this operation will most likely exit before all of these tasks are complete; when run with the --wait option, this operation will only exit once these tasks have completed.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#service-get", + "text": "Use the get subcommand to display information about an existing Layer0 service.", + "title": "service get" + }, + { + "location": "/reference/cli/#usage_29", + "text": "l0 service get [environmentName:]serviceName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_21", + "text": "[environmentName:]serviceName - The name of an existing Layer0 service. You can optionally provide the Layer0 environment ( environmentName ) of the service.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#service-list", + "text": "Use the list subcommand to list all of the existing services in your Layer0 instance.", + "title": "service list" + }, + { + "location": "/reference/cli/#usage_30", + "text": "l0 service get list", + "title": "Usage" + }, + { + "location": "/reference/cli/#service-logs", + "text": "Use the logs subcommand to display the logs from a Layer0 service that is currently running.", + "title": "service logs" + }, + { + "location": "/reference/cli/#usage_31", + "text": "l0 service logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] serviceName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_22", + "text": "serviceName - The name of the Layer0 service for which you want to view logs.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_10", + "text": "--start MM/DD HH:MM - The start of the time range to fetch logs. --end MM/DD HH:MM - The end of the time range to fetch logs. --tail=N - Display only the last N lines of the log.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#service-scale", + "text": "Use the scale subcommand to specify how many copies of an existing Layer0 service should run.", + "title": "service scale" + }, + { + "location": "/reference/cli/#usage_32", + "text": "l0 service scale [environmentName:]serviceName copies", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_23", + "text": "[environmentName:]serviceName - The name of the Layer0 service that you want to scale up. You can optionally provide the Layer0 environment ( environmentName ) of the service. copies - The number of copies of the specified service that should be run.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#task", + "text": "A Layer0 task is a component of an environment. A task executes the contents of a Docker image, as specified in a deploy. A task differs from a service in that a task does not restart after exiting. Additionally, ports are not exposed when using a task. The task command is used with the following subcommands: create , delete , get , list , and logs .", + "title": "Task" + }, + { + "location": "/reference/cli/#task-create", + "text": "Use the create subcommand to create a Layer0 task.", + "title": "task create" + }, + { + "location": "/reference/cli/#usage_33", + "text": "l0 task create [--copies copies | --no-logs] environmentName taskName deployName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_24", + "text": "environmentName - The name of the existing Layer0 environment in which you want to create the task. taskName - A name for the task. deployName - The name of an existing Layer0 deploy that the task should use.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_11", + "text": "--copies copies - The number of copies of the task to run (default: 1). --no-logs - Disable cloudwatch logging for the service.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#task-delete", + "text": "Use the delete subcommand to delete an existing Layer0 task.", + "title": "task delete" + }, + { + "location": "/reference/cli/#usage_34", + "text": "l0 task delete [environmentName:]taskName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_25", + "text": "[environmentName:]taskName - The name of the Layer0 task that you want to delete. You can optionally specify the name of the Layer0 environment that contains the task. This parameter is only required if mulitiple environments contain tasks with exactly the same name.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#additional-information_12", + "text": "Until the record has been purged, the API may indicate that the task is still running. Task records are typically purged within an hour.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#task-get", + "text": "Use the get subcommand to display information about an existing Layer0 task ( taskName ).", + "title": "task get" + }, + { + "location": "/reference/cli/#usage_35", + "text": "l0 task get [environmentName:]taskName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_26", + "text": "[environmentName:]taskName - The name of a Layer0 task for which you want to see information. You can optionally specify the name of the Layer0 Environment that contains the task.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#additional-information_13", + "text": "The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName , then information about all matching tasks will be returned.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#task-list", + "text": "Use the task subcommand to display a list of running tasks in your Layer0.", + "title": "task list" + }, + { + "location": "/reference/cli/#usage_36", + "text": "l0 task list", + "title": "Usage" + }, + { + "location": "/reference/cli/#task-logs", + "text": "Use the logs subcommand to display logs for a running Layer0 task.", + "title": "task logs" + }, + { + "location": "/reference/cli/#usage_37", + "text": "l0 task logs [--start MM/DD HH:MM | --end MM/DD HH:MM | --tail=N] taskName", + "title": "Usage" + }, + { + "location": "/reference/cli/#required-parameters_27", + "text": "taskName - The name of an existing Layer0 task.", + "title": "Required parameters" + }, + { + "location": "/reference/cli/#optional-arguments_12", + "text": "--start MM/DD HH:MM - The start of the time range to fetch logs. --end MM/DD HH:MM - The end of the time range to fetch logs. --tail=N - Display only the last N lines of the log.", + "title": "Optional arguments" + }, + { + "location": "/reference/cli/#additional-information_14", + "text": "The value of taskName does not need to exactly match the name of an existing task. If multiple results are found that match the pattern you specified in taskName , then information about all matching tasks will be returned.", + "title": "Additional information" + }, + { + "location": "/reference/cli/#task-list_1", + "text": "Use the list subcommand to display a list of running tasks in your Layer0.", + "title": "task list" + }, + { + "location": "/reference/cli/#usage_38", + "text": "l0 task list", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/", + "text": "Layer0 Setup Reference\n#\n\n\nThe Layer0 Setup application (commonly called \nl0-setup\n), is used for administrative tasks on Layer0 instances.\n\n\nGlobal options\n#\n\n\nl0-setup\n can be used with one of several commands: \ninit\n, \nplan\n, \napply\n, \nlist\n, \npush\n, \npull\n, \nendpoint\n, \ndestroy\n, \nupgrade\n, and \nset\n. These commands are detailed in teh sections below. There are, however, some global paramters that you may specify whenever using \nl0-setup\n\n\nUsage\n#\n\n\nl0\n-\nsetup\n \n[\nglobal\n \noptions\n]\n \ncommand\n \n[\ncommand\n \noptions\n]\n \nparams\n\n\n\n\n\n\nGlobal options\n#\n\n\n\n\n-l value, --log value\n - The log level to display on the console when you run commands. (default: info)\n\n\n--version\n - Display the version number of the \nl0-setup\n application.\n\n\n\n\n\n\nInit\n#\n\n\nThe \ninit\n command is used to initialize or reconfigure a Layer0 instance. \nThis command will prompt the user for inputs required to create/update a Layer0 instance. \nEach of the inputs can be specified through an optional flag.\n\n\nUsage\n#\n\n\nl0-setup init [--docker-path path | --module-source path | \n --version version | --aws-region region | --aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--docker-path\n - Path to docker config.json file. This is used to include private Docker Registry authentication for this Layer0 instance.\n\n\n--module-source\n - The source input variable is the path to the Terraform Layer0. By default, this points to the Layer0 github repository. Using values other than the default may result in undesired consequences.\n\n\n--version\n - The version input variable specifies the tag to use for the Layer0 Docker images: \nquintilesims/l0-api\n and \nquintilesims/l0-runner\n.\n\n\n--aws-ssh-key-pair\n - The ssh_key_pair input variable specifies the name of the ssh key pair to include in EC2 instances provisioned by Layer0. This key pair must already exist in the AWS account. The names of existing key pairs can be found in the EC2 dashboard.\n\n\n--aws-access-key\n - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n--aws-secret-key\n - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n\n\n\n\nPlan\n#\n\n\nThe \nplan\n command is used to show the planned operation(s) to run during the next \napply\n on a Layer0 instance without actually executing any actions\n\n\nUsage\n#\n\n\nl0-setup plan instanceName\n\n\n\n\n\n\n\nApply\n#\n\n\nThe \napply\n command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the \n--push=false\n flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional \n--aws-*\n flags, are read from the environment variables or a credentials file. \n\n\nUsage\n#\n\n\nl0-setup apply [--quick | --push=false | --aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--quick\n - Skips verification checks that normally run after \nterraform apply\n has completed\n\n\n--push=false\n - Skips uploading local Layer0 configuration files to an S3 bucket\n\n\n--aws-access-key\n - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n--aws-secret-key\n - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n\n\n\n\nList\n#\n\n\nThe \nlist\n command is used to list local and remote Layer0 instances.\n\n\nUsage\n#\n\n\nl0-setup list [--local=false | --remote=false | --aws-access-key accessKey | \n --aws-secret-key secretKey]\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n-l, --local\n - Show local Layer0 instances. This value is true by default.\n\n\n-r, --remote\n - Show remote Layer0 instances. This value is true by default. \n\n\n\n\n\n\nPush\n#\n\n\nThe \npush\n command is used to back up your Layer0 configuration files to an S3 bucket.\n\n\nUsage\n#\n\n\nl0-setup push [--aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--aws-access-key\n - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n--aws-secret-key\n - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n\n\n\n\nPull\n#\n\n\nThe \npull\n command is used copy Layer0 configuration files from an S3 bucket.\n\n\nUsage\n#\n\n\nl0-setup pull [--aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--aws-access-key\n - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n--aws-secret-key\n - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the \nAdministratorAccess\n policy.\n\n\n\n\n\n\nEndpoint\n#\n\n\nThe \nendpoint\n command is used to show environment variables used to connect to a Layer0 instance\n\n\nUsage\n#\n\n\nl0-setup endpoint [-i | -d | -s syntax] instanceName\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n-i, --insecure\n - Show environment variables that allow for insecure settings\n\n\n-d, --dev\n - Show environment variables that are required for local development\n\n\n-s --syntax\n - Choose the syntax to display environment variables \n(choices: \nbash\n, \ncmd\n, \npowershell\n) (default: \nbash\n)\n\n\n\n\n\n\nDestroy\n#\n\n\nThe \ndestroy\n command is used to destroy all resources associated with a Layer0 instance.\n\n\n\n\nCaution\n\n\nDestroying a Layer0 instance cannot be undone. If you created backups of your Layer0 configuration using the \npush\n command, those backups will also be deleted when you run the \ndestroy\n command.\n\n\n\n\nUsage\n#\n\n\nl0-setup destroy [--force] instanceName\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--force\n - Skips confirmation prompt\n\n\n\n\n\n\nUpgrade\n#\n\n\nThe \nupgrade\n command is used to upgrade a Layer0 instance to a new version.\nYou will need to run an \napply\n after this command has completed. \n\n\nUsage\n#\n\n\nl0-setup upgrade [--force] instanceName version\n\n\n\n\n\nOptional arguments\n#\n\n\n\n\n--force\n - Skips confirmation prompt\n\n\n\n\n\n\nSet\n#\n\n\nThe \nset\n command is used set input variable(s) for a Layer0 instance's Terraform module.\nThis command can be used to shorthand the \ninit\n and \nupgrade\n commands, and can also be used with custom Layer0 modules. \nYou will need to run an \napply\n after this command has completed. \n\n\nUsage\n#\n\n\nl0-setup set [--input key=value] instanceName\n\n\n\n\n\nOptions\n#\n\n\n\n\n--input key=val\n - Specify an input using \nkey=val\n format\n\n\n\n\nExample Usage\n#\n\n\nl0-setup set --input username=admin --input password=pass123 mylayer0", + "title": "Layer0 Setup CLI" + }, + { + "location": "/reference/setup-cli/#layer0-setup-reference", + "text": "The Layer0 Setup application (commonly called l0-setup ), is used for administrative tasks on Layer0 instances.", + "title": "Layer0 Setup Reference" + }, + { + "location": "/reference/setup-cli/#global-options", + "text": "l0-setup can be used with one of several commands: init , plan , apply , list , push , pull , endpoint , destroy , upgrade , and set . These commands are detailed in teh sections below. There are, however, some global paramters that you may specify whenever using l0-setup", + "title": "Global options" + }, + { + "location": "/reference/setup-cli/#usage", + "text": "l0 - setup [ global options ] command [ command options ] params", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#global-options_1", + "text": "-l value, --log value - The log level to display on the console when you run commands. (default: info) --version - Display the version number of the l0-setup application.", + "title": "Global options" + }, + { + "location": "/reference/setup-cli/#init", + "text": "The init command is used to initialize or reconfigure a Layer0 instance. \nThis command will prompt the user for inputs required to create/update a Layer0 instance. \nEach of the inputs can be specified through an optional flag.", + "title": "Init" + }, + { + "location": "/reference/setup-cli/#usage_1", + "text": "l0-setup init [--docker-path path | --module-source path | \n --version version | --aws-region region | --aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments", + "text": "--docker-path - Path to docker config.json file. This is used to include private Docker Registry authentication for this Layer0 instance. --module-source - The source input variable is the path to the Terraform Layer0. By default, this points to the Layer0 github repository. Using values other than the default may result in undesired consequences. --version - The version input variable specifies the tag to use for the Layer0 Docker images: quintilesims/l0-api and quintilesims/l0-runner . --aws-ssh-key-pair - The ssh_key_pair input variable specifies the name of the ssh key pair to include in EC2 instances provisioned by Layer0. This key pair must already exist in the AWS account. The names of existing key pairs can be found in the EC2 dashboard. --aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy. --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#plan", + "text": "The plan command is used to show the planned operation(s) to run during the next apply on a Layer0 instance without actually executing any actions", + "title": "Plan" + }, + { + "location": "/reference/setup-cli/#usage_2", + "text": "l0-setup plan instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#apply", + "text": "The apply command is used to create and update Layer0 instances. Note that the default behavior of apply is to push the layer0 configuration to an S3 bucket unless the --push=false flag is set to false. Pushing the configuration to an S3 bucket requires aws credentials which if not set via the optional --aws-* flags, are read from the environment variables or a credentials file.", + "title": "Apply" + }, + { + "location": "/reference/setup-cli/#usage_3", + "text": "l0-setup apply [--quick | --push=false | --aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments_1", + "text": "--quick - Skips verification checks that normally run after terraform apply has completed --push=false - Skips uploading local Layer0 configuration files to an S3 bucket --aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy. --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#list", + "text": "The list command is used to list local and remote Layer0 instances.", + "title": "List" + }, + { + "location": "/reference/setup-cli/#usage_4", + "text": "l0-setup list [--local=false | --remote=false | --aws-access-key accessKey | \n --aws-secret-key secretKey]", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments_2", + "text": "-l, --local - Show local Layer0 instances. This value is true by default. -r, --remote - Show remote Layer0 instances. This value is true by default.", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#push", + "text": "The push command is used to back up your Layer0 configuration files to an S3 bucket.", + "title": "Push" + }, + { + "location": "/reference/setup-cli/#usage_5", + "text": "l0-setup push [--aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments_3", + "text": "--aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy. --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#pull", + "text": "The pull command is used copy Layer0 configuration files from an S3 bucket.", + "title": "Pull" + }, + { + "location": "/reference/setup-cli/#usage_6", + "text": "l0-setup pull [--aws-access-key accessKey | \n --aws-secret-key secretKey] instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments_4", + "text": "--aws-access-key - The access_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy. --aws-secret-key - The secret_key input variable is used to provision the AWS resources required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key. It is recommended this key has the AdministratorAccess policy.", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#endpoint", + "text": "The endpoint command is used to show environment variables used to connect to a Layer0 instance", + "title": "Endpoint" + }, + { + "location": "/reference/setup-cli/#usage_7", + "text": "l0-setup endpoint [-i | -d | -s syntax] instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments_5", + "text": "-i, --insecure - Show environment variables that allow for insecure settings -d, --dev - Show environment variables that are required for local development -s --syntax - Choose the syntax to display environment variables \n(choices: bash , cmd , powershell ) (default: bash )", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#destroy", + "text": "The destroy command is used to destroy all resources associated with a Layer0 instance. Caution Destroying a Layer0 instance cannot be undone. If you created backups of your Layer0 configuration using the push command, those backups will also be deleted when you run the destroy command.", + "title": "Destroy" + }, + { + "location": "/reference/setup-cli/#usage_8", + "text": "l0-setup destroy [--force] instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments_6", + "text": "--force - Skips confirmation prompt", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#upgrade", + "text": "The upgrade command is used to upgrade a Layer0 instance to a new version.\nYou will need to run an apply after this command has completed.", + "title": "Upgrade" + }, + { + "location": "/reference/setup-cli/#usage_9", + "text": "l0-setup upgrade [--force] instanceName version", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#optional-arguments_7", + "text": "--force - Skips confirmation prompt", + "title": "Optional arguments" + }, + { + "location": "/reference/setup-cli/#set", + "text": "The set command is used set input variable(s) for a Layer0 instance's Terraform module.\nThis command can be used to shorthand the init and upgrade commands, and can also be used with custom Layer0 modules. \nYou will need to run an apply after this command has completed.", + "title": "Set" + }, + { + "location": "/reference/setup-cli/#usage_10", + "text": "l0-setup set [--input key=value] instanceName", + "title": "Usage" + }, + { + "location": "/reference/setup-cli/#options", + "text": "--input key=val - Specify an input using key=val format", + "title": "Options" + }, + { + "location": "/reference/setup-cli/#example-usage", + "text": "l0-setup set --input username=admin --input password=pass123 mylayer0", + "title": "Example Usage" + }, + { + "location": "/reference/terraform_introduction/", + "text": "Introduction to Terraform\n#\n\n\nWhat does Terraform do?\n#\n\n\nTerraform is a powerful orchestration tool for creating, updating, deleting, and otherwise managing infrastructure in an easy-to-understand, declarative manner.\nTerraform's \ndocumentation\n is very good, but at a glance:\n\n\nBe Declarative -\n\nSpecify desired infrastructure results in Terraform (\n*.tf\n) files, and let Terraform do the heavy work of figuring out how to make that specification a reality.\n\n\nScry the Future -\n\nUse \nterraform plan\n to see a list of everything that Terraform \nwould\n do without actually making those changes.\n\n\nVersion Infrastructure -\n\nCheck Terraform files into a VCS to track changes to and manage versions of your infrastructure.\n\n\nWhy Terraform?\n#\n\n\nWhy did we latch onto Terraform instead of something like CloudFormation?\n\n\nCloud-Agnostic -\n\nUnlike CloudFormation, Terraform is able to incorporate different \nresource providers\n to manage infrastructure across multiple cloud services (not just AWS).\n\n\nCustom Providers -\n\nTerraform can be extended to manage tools that don't come natively through use of custom providers.\nWe wrote a \nLayer0 provider\n so that Terraform can manage Layer0 resources in addition to tools and resources and infrastructure beyond Layer0's scope.\n\n\nTerraform has some \nthings to say\n on the matter as well.\n\n\nAdvantages Versus Layer0 CLI?\n#\n\n\nWhy should you move from using (or scripting) the Layer0 CLI directly?\n\n\nReduce Fat-Fingering Mistakes -\n\nCreating Terraform files (and using \nterraform plan\n) allows you to review your deployment and catch errors.\nExecuting Layer0 CLI commands one-by-one is tiresome, non-transportable, and a process ripe for typos.\n\n\nGo Beyond Layer0 -\n\nRetain the benefits of leveraging Layer0's concepts and resources using our \nprovider\n, but also gain the ability to orchestrate resources and tools beyond the CLI's scope.\n\n\nHow do I get Terraform?\n#\n\n\nCheck out Terraform's \ndocumentation\n on the subject.", + "title": "Terraform" + }, + { + "location": "/reference/terraform_introduction/#introduction-to-terraform", + "text": "", + "title": "Introduction to Terraform" + }, + { + "location": "/reference/terraform_introduction/#what-does-terraform-do", + "text": "Terraform is a powerful orchestration tool for creating, updating, deleting, and otherwise managing infrastructure in an easy-to-understand, declarative manner.\nTerraform's documentation is very good, but at a glance: Be Declarative - \nSpecify desired infrastructure results in Terraform ( *.tf ) files, and let Terraform do the heavy work of figuring out how to make that specification a reality. Scry the Future - \nUse terraform plan to see a list of everything that Terraform would do without actually making those changes. Version Infrastructure - \nCheck Terraform files into a VCS to track changes to and manage versions of your infrastructure.", + "title": "What does Terraform do?" + }, + { + "location": "/reference/terraform_introduction/#why-terraform", + "text": "Why did we latch onto Terraform instead of something like CloudFormation? Cloud-Agnostic - \nUnlike CloudFormation, Terraform is able to incorporate different resource providers to manage infrastructure across multiple cloud services (not just AWS). Custom Providers - \nTerraform can be extended to manage tools that don't come natively through use of custom providers.\nWe wrote a Layer0 provider so that Terraform can manage Layer0 resources in addition to tools and resources and infrastructure beyond Layer0's scope. Terraform has some things to say on the matter as well.", + "title": "Why Terraform?" + }, + { + "location": "/reference/terraform_introduction/#advantages-versus-layer0-cli", + "text": "Why should you move from using (or scripting) the Layer0 CLI directly? Reduce Fat-Fingering Mistakes - \nCreating Terraform files (and using terraform plan ) allows you to review your deployment and catch errors.\nExecuting Layer0 CLI commands one-by-one is tiresome, non-transportable, and a process ripe for typos. Go Beyond Layer0 - \nRetain the benefits of leveraging Layer0's concepts and resources using our provider , but also gain the ability to orchestrate resources and tools beyond the CLI's scope.", + "title": "Advantages Versus Layer0 CLI?" + }, + { + "location": "/reference/terraform_introduction/#how-do-i-get-terraform", + "text": "Check out Terraform's documentation on the subject.", + "title": "How do I get Terraform?" + }, + { + "location": "/reference/terraform-plugin/", + "text": "Layer0 Terraform Provider Reference\n#\n\n\nTerraform is an open-source tool for provisioning and managing infrastructure.\nIf you are new to Terraform, we recommend checking out their \ndocumentation\n.\n\n\nLayer0 has built a custom \nprovider\n for Layer0.\nThis provider allows users to create, manage, and update Layer0 entities using Terraform.\n\n\nPrerequisites\n#\n\n\n\n\nTerraform v0.11+\n (\ndownload\n), accessible in your system path.\n\n\n\n\nInstall\n#\n\n\nDownload a Layer0 v0.8.4+ \nrelease\n.\nThe Terraform plugin binary is located in the release zip file as \nterraform-provider-layer0\n.\nCopy this \nterraform-provider-layer0\n binary into the same directory as your Terraform binary - and you're done!\n\n\nFor further information, see Terraform's documentation on installing a Terraform plugin \nhere\n.\n\n\nGetting Started\n#\n\n\n\n\nCheckout the \nTerraform\n section of the Guestbook walkthrough \nhere\n.\n\n\nWe've added some tips and links to helpful resources in the \nBest Practices\n section below.\n\n\n\n\n\n\nProvider\n#\n\n\nThe Layer0 provider is used to interact with a Layer0 API.\nThe provider needs to be configured with the proper credentials before it can be used.\n\n\nExample Usage\n#\n\n\n# Add \nendpoint\n and \ntoken\n variables\nvariable \nendpoint\n {}\n\nvariable \ntoken\n {}\n\n# Configure the layer0 provider\nprovider \nlayer0\n {\n endpoint = \n${\nvar\n.\nendpoint\n}\n\n token = \n${\nvar\n.\ntoken\n}\n\n skip_ssl_verify = true\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nNote\n\n\nThe \nendpoint\n and \ntoken\n variables for your layer0 api can be found using the \nl0-setup endpoint\n command\n\n\n\n\n\n\nendpoint\n - (Required) The endpoint of the layer0 api\n\n\ntoken\n - (Required) The authentication token for the layer0 api\n\n\nskip_ssl_verify\n - (Optional) If true, ssl certificate mismatch warnings will be ignored\n\n\n\n\n\n\nAPI Data Source\n#\n\n\nThe API data source is used to extract useful read-only variables from the Layer0 API.\n\n\nExample Usage\n#\n\n\n# Configure the api data source\ndata \nlayer0_api\n \nconfig\n {}\n\n# Output the layer0 vpc id\noutput \nvpc id\n {\n val = \n${\ndata\n.\nlayer0_api\n.\nconfig\n.\nvpc_id\n}\n\n}\n\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nprefix\n - The prefix of the layer0 instance\n\n\nvpc_id\n - The vpc id of the layer0 instance\n\n\npublic_subnets\n - A list containing the 2 public subnet ids in the layer0 vpc\n\n\nprivate_subnets\n - A list containing the 2 private subnet ids in the layer0 vpc\n\n\n\n\n\n\nDeploy Data Source\n#\n\n\nThe Deploy data source is used to extract Layer0 Deploy attributes.\n\n\nExample Usage\n#\n\n\n# Configure the deploy data source\ndata \nlayer0_deploy\n \ndpl\n {\n name = \nmy-deploy\n\n version = \n1\n\n}\n\n# Output the layer0 deploy id\noutput \ndeploy_id\n {\n val = \n${\ndata\n.\nlayer0_deploy\n.\ndpl\n.\nid\n}\n\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the deploy\n\n\nversion\n - (Required) The version of the deploy\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nname\n - The name of the deploy\n\n\nversion\n - The version of the deploy\n\n\nid\n - The id of the deploy\n\n\n\n\n\n\nEnvironment Data Source\n#\n\n\nThe Environment data source is used to extract Layer0 Environment attributes.\n\n\nExample Usage\n#\n\n\n# Configure the environment data source\ndata \nlayer0_environment\n \nenv\n {\n name = \nmy-environment\n\n}\n\n# Output the layer0 environment id\noutput \nenvironment_id\n {\n val = \n${\ndata\n.\nlayer0_environment\n.\nenv\n.\nid\n}\n\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the environment\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the environment\n\n\nname\n - The name of the environment\n\n\nsize\n - The size of the instances in the environment\n\n\nmin_count\n - The current number instances in the environment\n\n\nos\n - The operating system used for the environment\n\n\nami\n - The AMI ID used for the environment\n\n\n\n\n\n\nLoad Balancer Data Source\n#\n\n\nThe Load Balancer data source is used to extract Layer0 Load Balancer attributes.\n\n\nExample Usage\n#\n\n\n# Configure the load balancer source\ndata \nlayer0_load_balancer\n \nlb\n {\n name = \nmy-loadbalancer\n\n environment_id = \n${\ndata\n.\nlayer0_environment\n.\nenv\n.\nenvironment_id\n}\n\n}\n\n# Output the layer0 load balancer id\noutput \nload_balancer_id\n {\n val = \n${\ndata\n.\nlayer0_load_balancer\n.\nlb\n.\nid\n}\n\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (required) The name of the load balancer\n\n\nenvironment_id\n - (required) The id of the environment the load balancer exists in\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the load balancer\n\n\nname\n - The name of the load balancer\n\n\nenvironment_id\n - The id of the environment the load balancer exists in\n\n\nenvironment_name\n - The name of the environment the load balancer exists in\n\n\nprivate\n - Whether or not the load balancer is private\n\n\nurl\n - The URL of the load balancer\n\n\n\n\n\n\nService Data Source\n#\n\n\nThe Service data source is used to extract Layer0 Service attributes.\n\n\nExample Usage\n#\n\n\n# Configure the service data source\ndata \nlayer0_service\n \nsvc\n {\n name = \nmy-service\n\n environment_id = \n${\ndata\n.\nlayer0_environment\n.\nenv\n.\nenvironment_id\n}\n\n}\n\n# Output the layer0 service id\noutput \nservice_id\n {\n val = \n${\ndata\n.\nlayer0_service\n.\nsvc\n.\nid\n}\n\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (required) The name of the service\n\n\nenvironment_id\n - (required) The id of the environment the service exists in\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the service\n\n\nname\n - The name of the service\n\n\nenvironment_id\n - The id of the environment the service exists in\n\n\nenvironment_name\n - The name of the environment the service exists in\n\n\nscale\n - The current desired scale of the service\n\n\n\n\n\n\nDeploy Resource\n#\n\n\nProvides a Layer0 Deploy.\n\n\nPerforming variable substitution inside of your deploy's json file (typically named \nDockerrun.aws.json\n) can be done through Terraform's \ntemplate_file\n.\nFor a working example, please see the sample \nGuestbook\n application\n\n\nExample Usage\n#\n\n\n# Configure the deploy template\ndata \ntemplate_file\n \nguestbook\n {\n template = \n${\nfile\n(\nDockerrun.aws.json\n)\n}\n\n vars {\n docker_image_tag = \nlatest\n\n }\n}\n\n# Create a deploy using the rendered template\nresource \nlayer0_deploy\n \nguestbook\n {\n name = \nguestbook\n\n content = \n${\ndata\n.\ntemplate_file\n.\nguestbook\n.\nrendered\n}\n\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the deploy\n\n\ncontent\n - (Required) The content of the deploy\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the deploy\n\n\nname\n - The name of the deploy\n\n\nversion\n - The version number of the deploy\n\n\n\n\n\n\nEnvironment Resource\n#\n\n\nProvides a Layer0 Environment\n\n\nExample Usage\n#\n\n\n# Create a new environment\nresource \nlayer0_environment\n \ndemo\n {\n name = \ndemo\n\n size = \nm3.medium\n\n min_count = 0\n user_data = \necho hello, world\n\n os = \nlinux\n\n ami = \nami123\n\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the environment\n\n\nsize\n - (Optional, Default: \"m3.medium\") The size of the instances in the environment.\nAvailable instance sizes can be found \nhere\n\n\nmin_count\n - (Optional, Default: 0) The minimum number of instances allowed in the environment\n\n\nuser-data\n - (Optional) The user data template to use for the environment's autoscaling group.\nSee the \ncli reference\n for the default template.\n\n\nos\n - (Optional, Default: \"linux\") Specifies the type of operating system used in the environment.\nOptions are \"linux\" or \"windows\".\n\n\nami\n - (Optional) A custom AMI ID to use in the environment. \nIf not specified, Layer0 will use its default AMI ID for the specified operating system.\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the environment\n\n\nname\n - The name of the environment\n\n\nsize\n - The size of the instances in the environment\n\n\ncluster_count\n - The current number instances in the environment\n\n\nsecurity_group_id\n - The ID of the environment's security group\n\n\nos\n - The operating system used for the environment\n\n\nami\n - The AMI ID used for the environment\n\n\n\n\n\n\nLoad Balancer Resource\n#\n\n\nProvides a Layer0 Load Balancer\n\n\nExample Usage\n#\n\n\n# Create a new load balancer\nresource \nlayer0_load_balancer\n \nguestbook\n {\n name = \nguestbook\n\n environment = \ndemo123\n\n private = false\n\n port {\n host_port = 80\n container_port = 80\n protocol = \nhttp\n\n }\n\n port {\n host_port = 443\n container_port = 443\n protocol = \nhttps\n\n certificate = \ncert\n\n }\n\n health_check {\n target = \ntcp:80\n\n interval = 30\n timeout = 5\n healthy_threshold = 2\n unhealthy_threshold = 2\n }\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the load balancer\n\n\nenvironment\n - (Required) The id of the environment to place the load balancer inside of\n\n\nprivate\n - (Optional) If true, the load balancer will not be exposed to the public internet\n\n\nport\n - (Optional, Default: 80:80/tcp) A list of port blocks. Ports documented below\n\n\nhealth_check\n - (Optional, Default: \n{\"TCP:80\" 30 5 2 2}\n) A health_check block. Health check documented below\n\n\n\n\nPorts (\nport\n) support the following:\n\n\n\n\nhost_port\n - (Required) The port on the load balancer to listen on\n\n\ncontainer_port\n - (Required) The port on the docker container to route to\n\n\nprotocol\n - (Required) The protocol to listen on. Valid values are \nHTTP, HTTPS, TCP, or SSL\n\n\ncertificate\n - (Optional) The name of an SSL certificate. Only required if the \nHTTP\n or \nSSL\n protocol is used.\n\n\n\n\nHealthcheck (\nhealth_check\n) supports the following:\n\n\n\n\ntarget\n - (Required) The target of the check. Valid pattern is \nPROTOCOL:PORT/PATH\n, where \nPROTOCOL\n values are:\n\n\nHTTP\n, \nHTTPS\n - \nPORT\n and \nPATH\n are required\n\n\nTCP\n, \nSSL\n - \nPORT\n is required, \nPATH\n is not supported\n\n\n\n\n\n\ninterval\n - (Required) The interval between checks.\n\n\ntimeout\n - (Required) The length of time before the check times out.\n\n\nhealthy_threshold\n - (Required) The number of checks before the instance is declared healthy.\n\n\nunhealthy_threshold\n - (Required) The number of checks before the instance is declared unhealthy.\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the load balancer\n\n\nname\n - The name of the load balancer\n\n\nenvironment\n - The id of the environment the load balancer exists in\n\n\nprivate\n - Whether or not the load balancer is private\n\n\nurl\n - The URL of the load balancer\n\n\n\n\n\n\nService Resource\n#\n\n\nProvides a Layer0 Service\n\n\nExample Usage\n#\n\n\n# Create a new service\nresource \nlayer0_service\n \nguestbook\n {\n name = \nguestbook\n\n environment = \nenvironment123\n\n deploy = \ndeploy123\n\n load_balancer = \nloadbalancer123\n\n scale = 3\n}\n\n\n\n\n\nArgument Reference\n#\n\n\nThe following arguments are supported:\n\n\n\n\nname\n - (Required) The name of the service\n\n\nenvironment\n - (Required) The id of the environment to place the service inside of\n\n\ndeploy\n - (Required) The id of the deploy for the service to run\n\n\nload_balancer\n (Optional) The id of the load balancer to place the service behind\n\n\nscale\n (Optional, Default: 1) The number of copies of the service to run\n\n\n\n\nAttribute Reference\n#\n\n\nThe following attributes are exported:\n\n\n\n\nid\n - The id of the service\n\n\nname\n - The name of the service\n\n\nenvironment\n - The id of the environment the service exists in\n\n\ndeploy\n - The id of the deploy the service is running\n\n\nload_balancer\n - The id of the load balancer the service is behind (if \nload_balancer\n was set)\n\n\nscale\n - The current desired scale of the service\n\n\n\n\n\n\nBest Practices\n#\n\n\n\n\nAlways run \nTerraform plan\n before \nterraform apply\n.\nThis will show you what action(s) Terraform plans to make before actually executing them.\n\n\nUse \nvariables\n to reference secrets.\nSecrets can be placed in a file named \nterraform.tfvars\n, or by setting \nTF_VAR_*\n environment variables.\nMore information can be found \nhere\n.\n\n\nUse Terraform's \nremote\n command to backup and sync your \nterraform.tfstate\n file across different members in your organization.\nTerraform has documentation for using S3 as a backend \nhere\n.\n\n\nTerraform \nmodules\n allow you to define and consume reusable components.\n\n\nExample configurations can be found \nhere", + "title": "Layer0 Terraform Plugin" + }, + { + "location": "/reference/terraform-plugin/#layer0-terraform-provider-reference", + "text": "Terraform is an open-source tool for provisioning and managing infrastructure.\nIf you are new to Terraform, we recommend checking out their documentation . Layer0 has built a custom provider for Layer0.\nThis provider allows users to create, manage, and update Layer0 entities using Terraform.", + "title": "Layer0 Terraform Provider Reference" + }, + { + "location": "/reference/terraform-plugin/#prerequisites", + "text": "Terraform v0.11+ ( download ), accessible in your system path.", + "title": "Prerequisites" + }, + { + "location": "/reference/terraform-plugin/#install", + "text": "Download a Layer0 v0.8.4+ release .\nThe Terraform plugin binary is located in the release zip file as terraform-provider-layer0 .\nCopy this terraform-provider-layer0 binary into the same directory as your Terraform binary - and you're done! For further information, see Terraform's documentation on installing a Terraform plugin here .", + "title": "Install" + }, + { + "location": "/reference/terraform-plugin/#getting-started", + "text": "Checkout the Terraform section of the Guestbook walkthrough here . We've added some tips and links to helpful resources in the Best Practices section below.", + "title": "Getting Started" + }, + { + "location": "/reference/terraform-plugin/#provider", + "text": "The Layer0 provider is used to interact with a Layer0 API.\nThe provider needs to be configured with the proper credentials before it can be used.", + "title": "Provider" + }, + { + "location": "/reference/terraform-plugin/#example-usage", + "text": "# Add endpoint and token variables\nvariable endpoint {}\n\nvariable token {}\n\n# Configure the layer0 provider\nprovider layer0 {\n endpoint = ${ var . endpoint } \n token = ${ var . token } \n skip_ssl_verify = true\n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference", + "text": "The following arguments are supported: Note The endpoint and token variables for your layer0 api can be found using the l0-setup endpoint command endpoint - (Required) The endpoint of the layer0 api token - (Required) The authentication token for the layer0 api skip_ssl_verify - (Optional) If true, ssl certificate mismatch warnings will be ignored", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#api-data-source", + "text": "The API data source is used to extract useful read-only variables from the Layer0 API.", + "title": "API Data Source" + }, + { + "location": "/reference/terraform-plugin/#example-usage_1", + "text": "# Configure the api data source\ndata layer0_api config {}\n\n# Output the layer0 vpc id\noutput vpc id {\n val = ${ data . layer0_api . config . vpc_id } \n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference", + "text": "The following attributes are exported: prefix - The prefix of the layer0 instance vpc_id - The vpc id of the layer0 instance public_subnets - A list containing the 2 public subnet ids in the layer0 vpc private_subnets - A list containing the 2 private subnet ids in the layer0 vpc", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#deploy-data-source", + "text": "The Deploy data source is used to extract Layer0 Deploy attributes.", + "title": "Deploy Data Source" + }, + { + "location": "/reference/terraform-plugin/#example-usage_2", + "text": "# Configure the deploy data source\ndata layer0_deploy dpl {\n name = my-deploy \n version = 1 \n}\n\n# Output the layer0 deploy id\noutput deploy_id {\n val = ${ data . layer0_deploy . dpl . id } \n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_1", + "text": "The following arguments are supported: name - (Required) The name of the deploy version - (Required) The version of the deploy", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_1", + "text": "The following attributes are exported: name - The name of the deploy version - The version of the deploy id - The id of the deploy", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#environment-data-source", + "text": "The Environment data source is used to extract Layer0 Environment attributes.", + "title": "Environment Data Source" + }, + { + "location": "/reference/terraform-plugin/#example-usage_3", + "text": "# Configure the environment data source\ndata layer0_environment env {\n name = my-environment \n}\n\n# Output the layer0 environment id\noutput environment_id {\n val = ${ data . layer0_environment . env . id } \n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_2", + "text": "The following arguments are supported: name - (Required) The name of the environment", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_2", + "text": "The following attributes are exported: id - The id of the environment name - The name of the environment size - The size of the instances in the environment min_count - The current number instances in the environment os - The operating system used for the environment ami - The AMI ID used for the environment", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#load-balancer-data-source", + "text": "The Load Balancer data source is used to extract Layer0 Load Balancer attributes.", + "title": "Load Balancer Data Source" + }, + { + "location": "/reference/terraform-plugin/#example-usage_4", + "text": "# Configure the load balancer source\ndata layer0_load_balancer lb {\n name = my-loadbalancer \n environment_id = ${ data . layer0_environment . env . environment_id } \n}\n\n# Output the layer0 load balancer id\noutput load_balancer_id {\n val = ${ data . layer0_load_balancer . lb . id } \n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_3", + "text": "The following arguments are supported: name - (required) The name of the load balancer environment_id - (required) The id of the environment the load balancer exists in", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_3", + "text": "The following attributes are exported: id - The id of the load balancer name - The name of the load balancer environment_id - The id of the environment the load balancer exists in environment_name - The name of the environment the load balancer exists in private - Whether or not the load balancer is private url - The URL of the load balancer", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#service-data-source", + "text": "The Service data source is used to extract Layer0 Service attributes.", + "title": "Service Data Source" + }, + { + "location": "/reference/terraform-plugin/#example-usage_5", + "text": "# Configure the service data source\ndata layer0_service svc {\n name = my-service \n environment_id = ${ data . layer0_environment . env . environment_id } \n}\n\n# Output the layer0 service id\noutput service_id {\n val = ${ data . layer0_service . svc . id } \n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_4", + "text": "The following arguments are supported: name - (required) The name of the service environment_id - (required) The id of the environment the service exists in", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_4", + "text": "The following attributes are exported: id - The id of the service name - The name of the service environment_id - The id of the environment the service exists in environment_name - The name of the environment the service exists in scale - The current desired scale of the service", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#deploy-resource", + "text": "Provides a Layer0 Deploy. Performing variable substitution inside of your deploy's json file (typically named Dockerrun.aws.json ) can be done through Terraform's template_file .\nFor a working example, please see the sample Guestbook application", + "title": "Deploy Resource" + }, + { + "location": "/reference/terraform-plugin/#example-usage_6", + "text": "# Configure the deploy template\ndata template_file guestbook {\n template = ${ file ( Dockerrun.aws.json ) } \n vars {\n docker_image_tag = latest \n }\n}\n\n# Create a deploy using the rendered template\nresource layer0_deploy guestbook {\n name = guestbook \n content = ${ data . template_file . guestbook . rendered } \n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_5", + "text": "The following arguments are supported: name - (Required) The name of the deploy content - (Required) The content of the deploy", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_5", + "text": "The following attributes are exported: id - The id of the deploy name - The name of the deploy version - The version number of the deploy", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#environment-resource", + "text": "Provides a Layer0 Environment", + "title": "Environment Resource" + }, + { + "location": "/reference/terraform-plugin/#example-usage_7", + "text": "# Create a new environment\nresource layer0_environment demo {\n name = demo \n size = m3.medium \n min_count = 0\n user_data = echo hello, world \n os = linux \n ami = ami123 \n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_6", + "text": "The following arguments are supported: name - (Required) The name of the environment size - (Optional, Default: \"m3.medium\") The size of the instances in the environment.\nAvailable instance sizes can be found here min_count - (Optional, Default: 0) The minimum number of instances allowed in the environment user-data - (Optional) The user data template to use for the environment's autoscaling group.\nSee the cli reference for the default template. os - (Optional, Default: \"linux\") Specifies the type of operating system used in the environment.\nOptions are \"linux\" or \"windows\". ami - (Optional) A custom AMI ID to use in the environment. \nIf not specified, Layer0 will use its default AMI ID for the specified operating system.", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_6", + "text": "The following attributes are exported: id - The id of the environment name - The name of the environment size - The size of the instances in the environment cluster_count - The current number instances in the environment security_group_id - The ID of the environment's security group os - The operating system used for the environment ami - The AMI ID used for the environment", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#load-balancer-resource", + "text": "Provides a Layer0 Load Balancer", + "title": "Load Balancer Resource" + }, + { + "location": "/reference/terraform-plugin/#example-usage_8", + "text": "# Create a new load balancer\nresource layer0_load_balancer guestbook {\n name = guestbook \n environment = demo123 \n private = false\n\n port {\n host_port = 80\n container_port = 80\n protocol = http \n }\n\n port {\n host_port = 443\n container_port = 443\n protocol = https \n certificate = cert \n }\n\n health_check {\n target = tcp:80 \n interval = 30\n timeout = 5\n healthy_threshold = 2\n unhealthy_threshold = 2\n }\n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_7", + "text": "The following arguments are supported: name - (Required) The name of the load balancer environment - (Required) The id of the environment to place the load balancer inside of private - (Optional) If true, the load balancer will not be exposed to the public internet port - (Optional, Default: 80:80/tcp) A list of port blocks. Ports documented below health_check - (Optional, Default: {\"TCP:80\" 30 5 2 2} ) A health_check block. Health check documented below Ports ( port ) support the following: host_port - (Required) The port on the load balancer to listen on container_port - (Required) The port on the docker container to route to protocol - (Required) The protocol to listen on. Valid values are HTTP, HTTPS, TCP, or SSL certificate - (Optional) The name of an SSL certificate. Only required if the HTTP or SSL protocol is used. Healthcheck ( health_check ) supports the following: target - (Required) The target of the check. Valid pattern is PROTOCOL:PORT/PATH , where PROTOCOL values are: HTTP , HTTPS - PORT and PATH are required TCP , SSL - PORT is required, PATH is not supported interval - (Required) The interval between checks. timeout - (Required) The length of time before the check times out. healthy_threshold - (Required) The number of checks before the instance is declared healthy. unhealthy_threshold - (Required) The number of checks before the instance is declared unhealthy.", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_7", + "text": "The following attributes are exported: id - The id of the load balancer name - The name of the load balancer environment - The id of the environment the load balancer exists in private - Whether or not the load balancer is private url - The URL of the load balancer", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#service-resource", + "text": "Provides a Layer0 Service", + "title": "Service Resource" + }, + { + "location": "/reference/terraform-plugin/#example-usage_9", + "text": "# Create a new service\nresource layer0_service guestbook {\n name = guestbook \n environment = environment123 \n deploy = deploy123 \n load_balancer = loadbalancer123 \n scale = 3\n}", + "title": "Example Usage" + }, + { + "location": "/reference/terraform-plugin/#argument-reference_8", + "text": "The following arguments are supported: name - (Required) The name of the service environment - (Required) The id of the environment to place the service inside of deploy - (Required) The id of the deploy for the service to run load_balancer (Optional) The id of the load balancer to place the service behind scale (Optional, Default: 1) The number of copies of the service to run", + "title": "Argument Reference" + }, + { + "location": "/reference/terraform-plugin/#attribute-reference_8", + "text": "The following attributes are exported: id - The id of the service name - The name of the service environment - The id of the environment the service exists in deploy - The id of the deploy the service is running load_balancer - The id of the load balancer the service is behind (if load_balancer was set) scale - The current desired scale of the service", + "title": "Attribute Reference" + }, + { + "location": "/reference/terraform-plugin/#best-practices", + "text": "Always run Terraform plan before terraform apply .\nThis will show you what action(s) Terraform plans to make before actually executing them. Use variables to reference secrets.\nSecrets can be placed in a file named terraform.tfvars , or by setting TF_VAR_* environment variables.\nMore information can be found here . Use Terraform's remote command to backup and sync your terraform.tfstate file across different members in your organization.\nTerraform has documentation for using S3 as a backend here . Terraform modules allow you to define and consume reusable components. Example configurations can be found here", + "title": "Best Practices" + }, + { + "location": "/reference/updateservice/", + "text": "Updating a Layer0 service\n#\n\n\nThere are three methods of updating an existing Layer0 service. The first method is to update the existing Deploy to refer to a new Docker task definition. The second method is to create a new Service that uses the same Loadbalancer. The third method is to create both a new Loadbalancer and a new Service.\n\n\nThere are advantages and disadvantages to each of these methods. The following sections discuss the advantages and disadvantages of using each method, and include procedures for implementing each method.\n\n\nMethod 1: Refer to a new task definition\n#\n\n\nThis method of updating a Layer0 application is the easiest to implement, because you do not need to rescale the Service or modify the Loadbalancer. This method is completely transparent to all other components of the application, and using this method does not involve any downtime.\n\n\nThe disadvantage of using this method is that you cannot perform A/B testing of the old and new services, and you cannot control which traffic goes to the old service and which goes to the new one.\n\n\nTo replace a Deploy to refer to a new task definition:\n\n\nAt the command line, type the following to create a new Deploy:\n\n\nl0 deploy create taskDefPath deployName\n\n\n\n\n\ntaskDefPath\n is the path to the ECS Task Definition. Note that if \ndeployName\n already exists, this step will create a new version of that Deploy.\n\n\nUse \nl0 service update\n to update the existing service:\n\n\nl0 service update serviceName deployName[:deployVersion]\n\n\n\n\n\nBy default, the service name you specify in this command will refer to the latest version of \ndeployName\n. You can optionally specify a specific version of the deploy, as shown above.\n\n\nMethod 2: Create a new Deploy and Service using the same Loadbalancer\n#\n\n\nThis method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the \nl0 service scale\n command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates.\n\n\nThe disadvantage of using this method is that you cannot control the routing of traffic between the old and new versions of the application.\n\n\nTo create a new Deploy and Service:\n\n\nAt the command line, type the following to create a new deploy or a new version of a deploy:\n\n\nl0 deploy create taskDefPath deployName\n\n\n\n\n\ntaskDefPath\n is the path to the ECS Task Definition. Note that if \ndeployName\n already exists, this step will create a new version of that Deploy.\n\n\nUse \nl0 service create\n to create a new service that uses \ndeployName\n behind an existing load balancer named \nloadBalancerName\n\n\nl0 service create --loadbalancer [environmentName:]loadBalancerName environmentName serviceName deployName[:deployVersion]\n\n\n\n\n\nBy default, the service name you specify in this command will refer to the latest version of \ndeployName\n. You can optionally specify a specific version of the deploy, as shown above. You can also optionally specify the name of the environment, \nenvironmentName\n where the load balancer exists. \n\n\nCheck to make sure that the new service is working as expected. If it is, and you do not want to keep the old service, delete the old service: \n\n\nl0 service delete service\n\n\n\n\n\nMethod 3: Create a new Deploy, Loadbalancer and Service\n#\n\n\nThe final method of updating a Layer0 service is to create an entirely new Deploy, Load Balancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services.\n\n\nThe disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Load Balancer.\n\n\nTo create a new Deploy, Load Balancer and Service:\n\n\nType the following command to create a new Deploy:\n\n\nl0 deploy create taskDefPath deployName\n\n\n\n\n\ntaskDefPath\n is the path to the ECS Task Definition. Note that if \ndeployName\n already exists, this step will create a new version of that Deploy.\n\n\nUse \nl0 loadbalancer create\n to create a new Load Balancer:\n\n\nl0 loadbalancer create --port port environmentName loadBalancerName deployName\n\n\n\n\n\n\n\nport\n is the port configuration for the listener of the Load Balancer. Valid pattern is \nhostPort:containerPort/protocol\n. Multiple ports can be specified using \n--port port1 --port port2 ...\n.\n\n\nhostPort\n - The port that the load balancer will listen for traffic on.\n\n\ncontainerPort\n - The port that the load balancer will forward traffic to.\n\n\nprotocol\n - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS).\n\n\n\n\n\n\n\n\n\n\nNote\n\n\nThe value of \nloadbalancerName\n in the above command must be unique to the Environment.\n\n\n\n\nUse \nl0 service create\n to create a new Service using the Load Balancer you just created: \n\n\nl0 service create --loadbalancer loadBalancerName environmentName serviceName deployName\n\n\n\n\n\n\n\nNote\n\n\nThe value of \nserviceName\n in the above command must be unique to the Environment.\n\n\n\n\nImplement a method of routing traffic between the old and new Services, such as \nHAProxy\n or \nConsul\n.", + "title": "Updating a Service" + }, + { + "location": "/reference/updateservice/#updating-a-layer0-service", + "text": "There are three methods of updating an existing Layer0 service. The first method is to update the existing Deploy to refer to a new Docker task definition. The second method is to create a new Service that uses the same Loadbalancer. The third method is to create both a new Loadbalancer and a new Service. There are advantages and disadvantages to each of these methods. The following sections discuss the advantages and disadvantages of using each method, and include procedures for implementing each method.", + "title": "Updating a Layer0 service" + }, + { + "location": "/reference/updateservice/#method-1-refer-to-a-new-task-definition", + "text": "This method of updating a Layer0 application is the easiest to implement, because you do not need to rescale the Service or modify the Loadbalancer. This method is completely transparent to all other components of the application, and using this method does not involve any downtime. The disadvantage of using this method is that you cannot perform A/B testing of the old and new services, and you cannot control which traffic goes to the old service and which goes to the new one. To replace a Deploy to refer to a new task definition: At the command line, type the following to create a new Deploy: l0 deploy create taskDefPath deployName taskDefPath is the path to the ECS Task Definition. Note that if deployName already exists, this step will create a new version of that Deploy. Use l0 service update to update the existing service: l0 service update serviceName deployName[:deployVersion] By default, the service name you specify in this command will refer to the latest version of deployName . You can optionally specify a specific version of the deploy, as shown above.", + "title": "Method 1: Refer to a new task definition" + }, + { + "location": "/reference/updateservice/#method-2-create-a-new-deploy-and-service-using-the-same-loadbalancer", + "text": "This method of updating a Layer0 application is also rather easy to implement. Like the method described in the previous section, this method is completely transparent to all other services and components of the application. This method also you allows you to re-scale the service if necessary, using the l0 service scale command. Finally, this method allows for indirect A/B testing of the application; you can change the scale of the application, and observe the success and failure rates. The disadvantage of using this method is that you cannot control the routing of traffic between the old and new versions of the application. To create a new Deploy and Service: At the command line, type the following to create a new deploy or a new version of a deploy: l0 deploy create taskDefPath deployName taskDefPath is the path to the ECS Task Definition. Note that if deployName already exists, this step will create a new version of that Deploy. Use l0 service create to create a new service that uses deployName behind an existing load balancer named loadBalancerName l0 service create --loadbalancer [environmentName:]loadBalancerName environmentName serviceName deployName[:deployVersion] By default, the service name you specify in this command will refer to the latest version of deployName . You can optionally specify a specific version of the deploy, as shown above. You can also optionally specify the name of the environment, environmentName where the load balancer exists. Check to make sure that the new service is working as expected. If it is, and you do not want to keep the old service, delete the old service: l0 service delete service", + "title": "Method 2: Create a new Deploy and Service using the same Loadbalancer" + }, + { + "location": "/reference/updateservice/#method-3-create-a-new-deploy-loadbalancer-and-service", + "text": "The final method of updating a Layer0 service is to create an entirely new Deploy, Load Balancer and Service. This method gives you complete control over both the new and the old Service, and allows you to perform true A/B testing by routing traffic to individual Services. The disadvantage of using this method is that you need to implement a method of routing traffic between the new and the old Load Balancer. To create a new Deploy, Load Balancer and Service: Type the following command to create a new Deploy: l0 deploy create taskDefPath deployName taskDefPath is the path to the ECS Task Definition. Note that if deployName already exists, this step will create a new version of that Deploy. Use l0 loadbalancer create to create a new Load Balancer: l0 loadbalancer create --port port environmentName loadBalancerName deployName port is the port configuration for the listener of the Load Balancer. Valid pattern is hostPort:containerPort/protocol . Multiple ports can be specified using --port port1 --port port2 ... . hostPort - The port that the load balancer will listen for traffic on. containerPort - The port that the load balancer will forward traffic to. protocol - The protocol to use when forwarding traffic (acceptable values: TCP, SSL, HTTP, and HTTPS). Note The value of loadbalancerName in the above command must be unique to the Environment. Use l0 service create to create a new Service using the Load Balancer you just created: l0 service create --loadbalancer loadBalancerName environmentName serviceName deployName Note The value of serviceName in the above command must be unique to the Environment. Implement a method of routing traffic between the old and new Services, such as HAProxy or Consul .", + "title": "Method 3: Create a new Deploy, Loadbalancer and Service" + }, + { + "location": "/reference/consul/", + "text": "Consul reference\n#\n\n\nConsul\n is an open-source tool for discovering and configuring services in your network architecture. Specifically, Consul provides the following features:\n\n\n\n\nDiscovery of services\n\n\nMonitoring of the health of services\n\n\nKey/value storage with a simple HTTP API\n\n\n\n\nConsul Agent\n#\n\n\nThe \nConsul Agent\n exposes a DNS API for easy consumption of data generated by \nRegistrator\n. The Consul Agent can run either in server or client mode.\n\n\nWhen run as a Layer0 Service, the Consul Agent runs in server mode. To ensure the integrity of your data, the service in which you run consul should be scaled to size 3 or greater. A group of several consul deployments is known as a \"\ncluster\n.\"\n\n\nOther Layer0 Services that use Consul will run the Consul Agent in client mode, alongside their application containers.\nThe client is a very lightweight process that registers services, runs health checks, and forwards queries to servers.\n\n\nRegistrator\n#\n\n\nRegistrator\n is a tool that automatically registers and deregisters services into a Consul Cluster by inspecting Docker containers as they come online.\nContainer registration is based off of environment variables on the container.\nLayer0 Services that use Consul will run Registrator alongside their application containers.\n\n\nService Configuration\n#\n\n\nLayer0 Services that use Consul will need to add the \nRegistrator\n and \nConsul Agent\n definitions to the\n\ncontainerDefinitions\n section of your Deploys. You must also add the \nDocker Socket\n definition to the \nvolumes\n section of your Deploys.\n\n\n\n\nRegistrator Container Definition\n#\n\n\n{\n \nname\n: \nregistrator\n,\n \nimage\n: \ngliderlabs/registrator:master\n,\n \nessential\n: true,\n \nlinks\n: [\nconsul-agent\n],\n \nentrypoint\n: [\n/bin/sh\n, \n-c\n],\n \ncommand\n: [\n/bin/registrator -retry-attempts=-1 -retry-interval=30000 -ip $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) consul://consul-agent:8500\n],\n \nmemory\n: 128,\n \nmountPoints\n: [\n {\n \nsourceVolume\n: \ndockersocket\n,\n \ncontainerPath\n: \n/tmp/docker.sock\n\n }\n ]\n},\n\n\n\n\n\n\n\nConsul Agent Container Definition\n#\n\n\n\n\nWarning\n\n\n\n\nYou must replace \nurl\n with your Layer0 Consul Load Balancer's\n\n\n{\n \nname\n: \nconsul-agent\n,\n \nimage\n: \nprogrium/consul\n,\n \nessential\n: true,\n \nentrypoint\n: [\n/bin/bash\n, \n-c\n],\n \ncommand\n: [\n/bin/start -advertise $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) -retry-join $EXTERNAL_URL -recursor $UPSTREAM_DNS -retry-interval 30s\n],\n \nmemory\n: 128,\n \nportMappings\n: [\n {\n \nhostPort\n: 8500,\n \ncontainerPort\n: 8500\n },\n {\n \nhostPort\n: 53,\n \ncontainerPort\n: 53,\n \nprotocol\n: \nudp\n\n }\n ],\n \nenvironment\n: [\n {\n \nname\n: \nEXTERNAL_URL\n,\n \nvalue\n: \nurl\n\n },\n {\n \nname\n: \nUPSTREAM_DNS\n,\n \nvalue\n: \n10.100.0.2\n\n }\n ]\n},\n\n\n\n\n\nEnvironment Variables\n#\n\n\n\n\nEXTERNAL_URL\n - URL of the consul cluster\n\n\nUPSTREAM_DNS\n - The DNS server consul-agent queries for DNS entries that it cannot resolve internally (e.g. google.com)\n\n\nThe default value for \nUPSTREAM_DNS\n assumes you're using the default Layer0 configuration, making your internal DNS endpoint 10.100.0.2. If you are a using a non standard configuration (e.g. installing Layer0 in an existing VPC with a CIDR other than \n10.100.0.0/16\n) please modify this variable accordingly.\n\n\n\n\n\n\n\n\n\n\nDocker Socket Volume Definition\n#\n\n\nvolumes\n: [\n {\n \nname\n: \ndockersocket\n,\n \nhost\n: {\n \nsourcePath\n: \n/var/run/docker.sock\n\n }\n }\n],", + "title": "Consul" + }, + { + "location": "/reference/consul/#consul-reference", + "text": "Consul is an open-source tool for discovering and configuring services in your network architecture. Specifically, Consul provides the following features: Discovery of services Monitoring of the health of services Key/value storage with a simple HTTP API", + "title": "Consul reference" + }, + { + "location": "/reference/consul/#consul-agent", + "text": "The Consul Agent exposes a DNS API for easy consumption of data generated by Registrator . The Consul Agent can run either in server or client mode. When run as a Layer0 Service, the Consul Agent runs in server mode. To ensure the integrity of your data, the service in which you run consul should be scaled to size 3 or greater. A group of several consul deployments is known as a \" cluster .\" Other Layer0 Services that use Consul will run the Consul Agent in client mode, alongside their application containers.\nThe client is a very lightweight process that registers services, runs health checks, and forwards queries to servers.", + "title": "Consul Agent" + }, + { + "location": "/reference/consul/#registrator", + "text": "Registrator is a tool that automatically registers and deregisters services into a Consul Cluster by inspecting Docker containers as they come online.\nContainer registration is based off of environment variables on the container.\nLayer0 Services that use Consul will run Registrator alongside their application containers.", + "title": "Registrator" + }, + { + "location": "/reference/consul/#service-configuration", + "text": "Layer0 Services that use Consul will need to add the Registrator and Consul Agent definitions to the containerDefinitions section of your Deploys. You must also add the Docker Socket definition to the volumes section of your Deploys.", + "title": "Service Configuration" + }, + { + "location": "/reference/consul/#registrator-container-definition", + "text": "{\n name : registrator ,\n image : gliderlabs/registrator:master ,\n essential : true,\n links : [ consul-agent ],\n entrypoint : [ /bin/sh , -c ],\n command : [ /bin/registrator -retry-attempts=-1 -retry-interval=30000 -ip $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) consul://consul-agent:8500 ],\n memory : 128,\n mountPoints : [\n {\n sourceVolume : dockersocket ,\n containerPath : /tmp/docker.sock \n }\n ]\n},", + "title": "Registrator Container Definition" + }, + { + "location": "/reference/consul/#consul-agent-container-definition", + "text": "Warning You must replace url with your Layer0 Consul Load Balancer's {\n name : consul-agent ,\n image : progrium/consul ,\n essential : true,\n entrypoint : [ /bin/bash , -c ],\n command : [ /bin/start -advertise $(wget http://169.254.169.254/latest/meta-data/local-ipv4 -q -O -) -retry-join $EXTERNAL_URL -recursor $UPSTREAM_DNS -retry-interval 30s ],\n memory : 128,\n portMappings : [\n {\n hostPort : 8500,\n containerPort : 8500\n },\n {\n hostPort : 53,\n containerPort : 53,\n protocol : udp \n }\n ],\n environment : [\n {\n name : EXTERNAL_URL ,\n value : url \n },\n {\n name : UPSTREAM_DNS ,\n value : 10.100.0.2 \n }\n ]\n},", + "title": "Consul Agent Container Definition" + }, + { + "location": "/reference/consul/#environment-variables", + "text": "EXTERNAL_URL - URL of the consul cluster UPSTREAM_DNS - The DNS server consul-agent queries for DNS entries that it cannot resolve internally (e.g. google.com) The default value for UPSTREAM_DNS assumes you're using the default Layer0 configuration, making your internal DNS endpoint 10.100.0.2. If you are a using a non standard configuration (e.g. installing Layer0 in an existing VPC with a CIDR other than 10.100.0.0/16 ) please modify this variable accordingly.", + "title": "Environment Variables" + }, + { + "location": "/reference/consul/#docker-socket-volume-definition", + "text": "volumes : [\n {\n name : dockersocket ,\n host : {\n sourcePath : /var/run/docker.sock \n }\n }\n],", + "title": "Docker Socket Volume Definition" + }, + { + "location": "/reference/task_definition/", + "text": "Task Definitions\n#\n\n\nThis guide gives some overview into the composition of a task definition.\nFor more comprehensive documentation, we recommend taking a look at the official AWS docs:\n\n\n\n\nCreating a Task Definition\n\n\nTask Definition Parameters\n\n\n\n\nSample\n#\n\n\nThe following snippet contains the task definition for the \nGuestbook\n application\n\n\n{\n \nAWSEBDockerrunVersion\n: 2,\n \ncontainerDefinitions\n: [\n {\n \nname\n: \nguestbook\n,\n \nimage\n: \nquintilesims/guestbook\n,\n \nessential\n: true,\n \nmemory\n: 128,\n \nportMappings\n: [\n {\n \nhostPort\n: 80,\n \ncontainerPort\n: 80\n }\n ],\n }\n ]\n}\n\n\n\n\n\n\n\nName\n The name of the container\n\n\n\n\n\n\nWarning\n\n\n\n\nIf you wish to update your task definition, the container names \nmust\n remain the same.\nIf any container names are changed or removed in an updated task definition,\nECS will not know how the existing container(s) should be mapped over and you will not be able to deploy the updated task definition.\nIf you encounter a scenario where you must change or remove a container's name in a task definition, we recommend re-creating the Layer0 Deploy and Service.\n\n\n\n\nImage\n The Docker image used to build the container. The image format is \nurl/image:tag\n\n\nThe \nurl\n specifies which Docker Repo to pull the image from\n If a non-Docker-Hub \nurl\n is not specified, \nDocker Hub\n is used (as is the case here)\n\n\nThe \nimage\n specifies the name of the image to grab (in this case, the \nguestbook\n image from the \nquintilesims\n Docker Hub group)\n\n\nThe \ntag\n specifies which version of image to grab\nIf \ntag\n is not specified, \n:latest\n is used\n\n\n\n\n\n\nEssential\n If set to \ntrue\n, all other containers in the task definition will be stopped if that container fails or stops for any reason.\nOtherwise, the container's failure will not affect the rest of the containers in the task definition.\n\n\nMemory\n The number of MiB of memory to reserve for the container.\nIf your container attempts to exceed the memory allocated here, the container is killed\n\n\nPortMappings\n A list of hostPort, containerPort mappings for the container\n\n\nHostPort\n The port number on the host instance reserved for your container.\nIf your Layer0 Service is behind a Layer0 Load Balancer, this should map to an \ninstancePort\n on the Layer0 Load Balancer.\n\n\nContainerPort\n The port number the container should receive traffic on.\nAny traffic received from the instance's \nhostPort\n will be forwarded to the container on this port", + "title": "Task Definitions" + }, + { + "location": "/reference/task_definition/#task-definitions", + "text": "This guide gives some overview into the composition of a task definition.\nFor more comprehensive documentation, we recommend taking a look at the official AWS docs: Creating a Task Definition Task Definition Parameters", + "title": "Task Definitions" + }, + { + "location": "/reference/task_definition/#sample", + "text": "The following snippet contains the task definition for the Guestbook application {\n AWSEBDockerrunVersion : 2,\n containerDefinitions : [\n {\n name : guestbook ,\n image : quintilesims/guestbook ,\n essential : true,\n memory : 128,\n portMappings : [\n {\n hostPort : 80,\n containerPort : 80\n }\n ],\n }\n ]\n} Name The name of the container Warning If you wish to update your task definition, the container names must remain the same.\nIf any container names are changed or removed in an updated task definition,\nECS will not know how the existing container(s) should be mapped over and you will not be able to deploy the updated task definition.\nIf you encounter a scenario where you must change or remove a container's name in a task definition, we recommend re-creating the Layer0 Deploy and Service. Image The Docker image used to build the container. The image format is url/image:tag The url specifies which Docker Repo to pull the image from\n If a non-Docker-Hub url is not specified, Docker Hub is used (as is the case here) The image specifies the name of the image to grab (in this case, the guestbook image from the quintilesims Docker Hub group) The tag specifies which version of image to grab\nIf tag is not specified, :latest is used Essential If set to true , all other containers in the task definition will be stopped if that container fails or stops for any reason.\nOtherwise, the container's failure will not affect the rest of the containers in the task definition. Memory The number of MiB of memory to reserve for the container.\nIf your container attempts to exceed the memory allocated here, the container is killed PortMappings A list of hostPort, containerPort mappings for the container HostPort The port number on the host instance reserved for your container.\nIf your Layer0 Service is behind a Layer0 Load Balancer, this should map to an instancePort on the Layer0 Load Balancer. ContainerPort The port number the container should receive traffic on.\nAny traffic received from the instance's hostPort will be forwarded to the container on this port", + "title": "Sample" + }, + { + "location": "/reference/architecture/", + "text": "Layer0 Architecture\n#\n\n\nLayer0 is built on top of the following primary technologies:\n\n\n\n\nApplication Container: \nDocker\n\n\nCloud Provider: \nAmazon Web Services\n\n\nContainer Management: \nAmazon EC2 Container Service (ECS)\n\n\nLoad Balancing: \nAmazon Elastic Load Balancing\n\n\nInfrastructure Configuration: Hashicorp \nTerraform\n\n\nIdentity Management: \nAuth0", + "title": "Architecture" + }, + { + "location": "/reference/architecture/#layer0-architecture", + "text": "Layer0 is built on top of the following primary technologies: Application Container: Docker Cloud Provider: Amazon Web Services Container Management: Amazon EC2 Container Service (ECS) Load Balancing: Amazon Elastic Load Balancing Infrastructure Configuration: Hashicorp Terraform Identity Management: Auth0", + "title": "Layer0 Architecture" + }, + { + "location": "/reference/ecr/", + "text": "EC2 Container Registry\n#\n\n\nECR is an Amazon implementation of a docker registry. It acts as a private registry in your AWS account, which can be accessed from any docker client, and Layer0. Consider using ECR if you have stability issues with hosted docker registries, and do not wish to share your images publicly on \ndockerhub\n.\n\n\nSetup\n#\n\n\nWhen interacting with ECR, you will first need to create a repository and a login to interact from your development machine.\n\n\nRepository\n#\n\n\nEach repository needs to be created by an AWS api call.\n\n\n \n aws ecr create-repository --repository-name myteam/myproject\n\n\n\n\n\nLogin\n#\n\n\nTo authenticate with the ECR service, Amazon provides the \nget-login\n command, which generates an authentication token, and returns a docker command to set it up\n\n\n \n aws ecr get-login\n # this command will return the following: (password is typically hundreds of characters)\n docker login -u AWS -p password -e none https://aws_account_id.dkr.ecr.us-east-1.amazonaws.com\n\n\n\n\n\nExecute the provided docker command to store the login credentials\n\n\nAfterward creating the repository and local login credentials you may interact with images (and tags) under this path from a local docker client.\n\n\n docker pull \n${\necr\n-\nurl\n}\n/myteam/myproject\n docker push \n${\necr\n-\nurl\n}\n/myteam/myproject:custom-tag-1\n\n\n\n\n\nDeploy Example\n#\n\n\nHere we'll walk through using ECR when deploying to Layer0, Using a very basic wait container.\n\n\nMake docker image\n#\n\n\nYour docker image can be built locally or pulled from dockerhub. For this example, we made a service that waits and then exits (useful for triggering regular restarts).\n\n\nFROM busybox\n\nENV SLEEP_TIME=60\n\nCMD sleep $SLEEP_TIME\n\n\n\n\n\nThen build the file, with the tag \nxfra/wait\n\n\n \n docker build -f Dockerfile.wait -t xfra/wait .\n\n\n\n\n\nUpload to ECR\n#\n\n\nAfter preparing a login and registry, tag the image with the remote url, and use \ndocker push\n\n\n docker tag xfra/wait 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait\n docker push 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait\n\n\n\n\n\n\n\nNote: your account id in this url will be different.\n\n\n\n\nCreate a deploy\n#\n\n\nTo run this image in Layer0, we create a dockerrun file, describing the instance and any additional variables\n\n\n{\n \ncontainerDefinitions\n: [\n {\n \nname\n: \ntimeout\n,\n \nimage\n: \n111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait:latest\n,\n \nessential\n: true,\n \nmemory\n: 10,\n \nenvironment\n: [\n { \nname\n: \nSLEEP_TIME\n, \nvalue\n: \n43200\n }\n ]\n }\n ]\n}\n\n\n\n\n\nAnd create that in Layer0\n\n\n l0 deploy create timeout.dockerrun.aws.json timeout\n\n\n\n\n\nDeploy\n#\n\n\nFinally, run that deploy as a service or a task. (the service will restart every 12 hours)\n\n\n l0 service create demo timeoutsvc timeout:latest\n\n\n\n\n\nReferences\n#\n\n\n\n\nECR User Guide\n\n\ncreate-repository\n\n\nget-login", + "title": "ECR" + }, + { + "location": "/reference/ecr/#ec2-container-registry", + "text": "ECR is an Amazon implementation of a docker registry. It acts as a private registry in your AWS account, which can be accessed from any docker client, and Layer0. Consider using ECR if you have stability issues with hosted docker registries, and do not wish to share your images publicly on dockerhub .", + "title": "EC2 Container Registry" + }, + { + "location": "/reference/ecr/#setup", + "text": "When interacting with ECR, you will first need to create a repository and a login to interact from your development machine.", + "title": "Setup" + }, + { + "location": "/reference/ecr/#repository", + "text": "Each repository needs to be created by an AWS api call. aws ecr create-repository --repository-name myteam/myproject", + "title": "Repository" + }, + { + "location": "/reference/ecr/#login", + "text": "To authenticate with the ECR service, Amazon provides the get-login command, which generates an authentication token, and returns a docker command to set it up aws ecr get-login\n # this command will return the following: (password is typically hundreds of characters)\n docker login -u AWS -p password -e none https://aws_account_id.dkr.ecr.us-east-1.amazonaws.com Execute the provided docker command to store the login credentials Afterward creating the repository and local login credentials you may interact with images (and tags) under this path from a local docker client. docker pull ${ ecr - url } /myteam/myproject\n docker push ${ ecr - url } /myteam/myproject:custom-tag-1", + "title": "Login" + }, + { + "location": "/reference/ecr/#deploy-example", + "text": "Here we'll walk through using ECR when deploying to Layer0, Using a very basic wait container.", + "title": "Deploy Example" + }, + { + "location": "/reference/ecr/#make-docker-image", + "text": "Your docker image can be built locally or pulled from dockerhub. For this example, we made a service that waits and then exits (useful for triggering regular restarts). FROM busybox\n\nENV SLEEP_TIME=60\n\nCMD sleep $SLEEP_TIME Then build the file, with the tag xfra/wait docker build -f Dockerfile.wait -t xfra/wait .", + "title": "Make docker image" + }, + { + "location": "/reference/ecr/#upload-to-ecr", + "text": "After preparing a login and registry, tag the image with the remote url, and use docker push docker tag xfra/wait 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait\n docker push 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait Note: your account id in this url will be different.", + "title": "Upload to ECR" + }, + { + "location": "/reference/ecr/#create-a-deploy", + "text": "To run this image in Layer0, we create a dockerrun file, describing the instance and any additional variables {\n containerDefinitions : [\n {\n name : timeout ,\n image : 111222333444.dkr.ecr.us-east-1.amazonaws.com/xfra-wait:latest ,\n essential : true,\n memory : 10,\n environment : [\n { name : SLEEP_TIME , value : 43200 }\n ]\n }\n ]\n} And create that in Layer0 l0 deploy create timeout.dockerrun.aws.json timeout", + "title": "Create a deploy" + }, + { + "location": "/reference/ecr/#deploy", + "text": "Finally, run that deploy as a service or a task. (the service will restart every 12 hours) l0 service create demo timeoutsvc timeout:latest", + "title": "Deploy" + }, + { + "location": "/reference/ecr/#references", + "text": "ECR User Guide create-repository get-login", + "title": "References" + }, + { + "location": "/troubleshooting/commonissues/", + "text": "Common issues and their solutions\n#\n\n\n\"Connection refused\" error when executing Layer0 commands\n#\n\n\nWhen executing commands using the Layer0 CLI, you may see the following error message: \n\n\nGet http://localhost:9090/command/: dial tcp 127.0.0.1:9090: connection refused\n\n\nWhere \ncommand\n is the Layer0 command you are trying to execute.\n\n\nThis error indicates that your Layer0 environment variables have not been set for the current session. See the \n\"Connect to a Layer0 Instance\" section\n of the Layer0 installation guide for instructions for setting up your environment variables.\n\n\n\n\n\"Invalid Dockerrun.aws.json\" error when creating a deploy\n#\n\n\nByte Order Marks (BOM) in Dockerrun file\n#\n\n\nIf your Dockerrun.aws.json file contains a Byte Order Marker, you may receive an \"Invalid Dockerrun.aws.json\" error when creating a deploy. If you create or edit the Dockerrun file using Visual Studio, and you have not modified the file encoding settings in Visual Studio, you are likely to encounter this error.\n\n\nTo remove the BOM:\n\n\n\n\n\n\nAt the command line, type the following to remove the BOM:\n\n\n\n\n(Linux/OS X) \n\n\n\n\ntail -c +4 DockerrunFile \n DockerrunFileNew\n\n\nReplace \nDockerrunFile\n with the path to your Dockerrun file, and \nDockerrunFileNew\n with a new name for the Dockerrun file without the BOM.\n\n\n\n\n\n\nAlternatively, you can use the \ndos2unix file converter\n to remove the BOM from your Dockerrun files. Dos2unix is available for Windows, Linux and Mac OS.\n\n\nTo remove the BOM using dos2unix:\n\n\n\n\nAt the command line, type the following:\n\n\n\n\ndos2unix --remove-bom -n DockerrunFile DockerrunFileNew\n\n\n\n\n\nReplace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM.\n\n\n\n\n\"AWS Error: the key pair '\n' does not exist (code 'ValidationError')\" with l0-setup\n#\n\n\nThis occurs when you pass an invalid EC2 keypair to l0-setup. To fix this, follow the instructions for \ncreating an EC2 Key Pair\n.\n\n\n\n\nAfter you've created a new EC2 Key Pair, use \nl0-setup init\n to reconfigure your instance:\n\n\n\n\nl0-setup init --aws-ssh-key-pair keypair", + "title": "Common Issues" + }, + { + "location": "/troubleshooting/commonissues/#common-issues-and-their-solutions", + "text": "", + "title": "Common issues and their solutions" + }, + { + "location": "/troubleshooting/commonissues/#connection-refused-error-when-executing-layer0-commands", + "text": "When executing commands using the Layer0 CLI, you may see the following error message: Get http://localhost:9090/command/: dial tcp 127.0.0.1:9090: connection refused Where command is the Layer0 command you are trying to execute. This error indicates that your Layer0 environment variables have not been set for the current session. See the \"Connect to a Layer0 Instance\" section of the Layer0 installation guide for instructions for setting up your environment variables.", + "title": "\"Connection refused\" error when executing Layer0 commands" + }, + { + "location": "/troubleshooting/commonissues/#invalid-dockerrunawsjson-error-when-creating-a-deploy", + "text": "", + "title": "\"Invalid Dockerrun.aws.json\" error when creating a deploy" + }, + { + "location": "/troubleshooting/commonissues/#byte-order-marks-bom-in-dockerrun-file", + "text": "If your Dockerrun.aws.json file contains a Byte Order Marker, you may receive an \"Invalid Dockerrun.aws.json\" error when creating a deploy. If you create or edit the Dockerrun file using Visual Studio, and you have not modified the file encoding settings in Visual Studio, you are likely to encounter this error. To remove the BOM: At the command line, type the following to remove the BOM: (Linux/OS X) tail -c +4 DockerrunFile DockerrunFileNew Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM. Alternatively, you can use the dos2unix file converter to remove the BOM from your Dockerrun files. Dos2unix is available for Windows, Linux and Mac OS. To remove the BOM using dos2unix: At the command line, type the following: dos2unix --remove-bom -n DockerrunFile DockerrunFileNew Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM.", + "title": "Byte Order Marks (BOM) in Dockerrun file" + }, + { + "location": "/troubleshooting/commonissues/#aws-error-the-key-pair-does-not-exist-code-validationerror-with-l0-setup", + "text": "This occurs when you pass an invalid EC2 keypair to l0-setup. To fix this, follow the instructions for creating an EC2 Key Pair . After you've created a new EC2 Key Pair, use l0-setup init to reconfigure your instance: l0-setup init --aws-ssh-key-pair keypair", + "title": "\"AWS Error: the key pair '' does not exist (code 'ValidationError')\" with l0-setup" + }, + { + "location": "/troubleshooting/ssh/", + "text": "Secure Shell (SSH)\n#\n\n\nYou can use Secure Shell (SSH) to access your Layer0 environment(s).\n\n\nBy default, Layer0 Setup asks for an EC2 key pair when creating a new Layer0. This key pair is associated with all machines that host your Layer0 Services. This means you can use SSH to log into the underlying Docker host to perform tasks such as troubleshooting failing containers or viewing logs. For information about creating an EC2 key pair, see \nInstall and Configure Layer0\n.\n\n\n\n\nWarning\n\n\nThis section is recommended for development debugging only.\nIt is \nnot\n recommended for production environments.\n\n\n\n\nTo SSH into a Service\n#\n\n\n\n\nIn a console window, add port 2222:22/tcp to your Service's load balancer:\n\n\n\n\nl0 loadbalancer addport \nname\n 2222:22/tcp\n\n\n\n\n\n\n \nSSH into your Service by supplying the load balancer url and key pair file name.\n\n\n\n\n\nssh -i \nkey pair path and file name\n ec2-user@\nload balancer url\n -p 2222\n\n\n\n\n\n\n \nIf required, Use Docker to access a specific container with Bash.\n\n\n\n\n\ndocker exec -it \ncontainer id\n /bin/bash\n\n\n\n\n\nRemarks\n#\n\n\nYou can get the load balancer url from the Load Balancers section of your Layer0 AWS console.\n\n\nUse the \nl0 loadbalancer dropport\n subcommand to remove a port configuration from an existing Layer0 load balancer.\n\n\nYou \ncannot\n change the key pair after a Layer0 has been created. If you lose your key pair or need to generate a new one, you will need to create a new Layer0.\n\n\nIf your Service is behind a private load balancer, or none at all, you can either re-create your Service behind a public load balancer, use an existing public load balancer as a \"jump\" point, or create a new Layer0 Service behind a public load balancer to serve as a \"jump\" point.", + "title": "Secure Shell (SSH)" + }, + { + "location": "/troubleshooting/ssh/#secure-shell-ssh", + "text": "You can use Secure Shell (SSH) to access your Layer0 environment(s). By default, Layer0 Setup asks for an EC2 key pair when creating a new Layer0. This key pair is associated with all machines that host your Layer0 Services. This means you can use SSH to log into the underlying Docker host to perform tasks such as troubleshooting failing containers or viewing logs. For information about creating an EC2 key pair, see Install and Configure Layer0 . Warning This section is recommended for development debugging only.\nIt is not recommended for production environments.", + "title": "Secure Shell (SSH)" + }, + { + "location": "/troubleshooting/ssh/#to-ssh-into-a-service", + "text": "In a console window, add port 2222:22/tcp to your Service's load balancer: l0 loadbalancer addport name 2222:22/tcp \n SSH into your Service by supplying the load balancer url and key pair file name. ssh -i key pair path and file name ec2-user@ load balancer url -p 2222 \n If required, Use Docker to access a specific container with Bash. docker exec -it container id /bin/bash", + "title": "To SSH into a Service" + }, + { + "location": "/troubleshooting/ssh/#remarks", + "text": "You can get the load balancer url from the Load Balancers section of your Layer0 AWS console. Use the l0 loadbalancer dropport subcommand to remove a port configuration from an existing Layer0 load balancer. You cannot change the key pair after a Layer0 has been created. If you lose your key pair or need to generate a new one, you will need to create a new Layer0. If your Service is behind a private load balancer, or none at all, you can either re-create your Service behind a public load balancer, use an existing public load balancer as a \"jump\" point, or create a new Layer0 Service behind a public load balancer to serve as a \"jump\" point.", + "title": "Remarks" + } + ] +} \ No newline at end of file diff --git a/docs/setup/destroy/index.html b/docs/setup/destroy/index.html index 8f687f105..fd73696cd 100644 --- a/docs/setup/destroy/index.html +++ b/docs/setup/destroy/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Destroy - Layer0 - + @@ -18,537 +18,746 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Destroy - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    Destroying a Layer0 Instance#

    +
    +
    + + + + + +

    Destroying a Layer0 Instance#

    This section provides procedures for destroying (deleting) a Layer0 instance.

    Part 1: Clean Up Your Layer0 Environments#

    In order to destroy a Layer0 instance, you must first delete all environments in the instance. List all environments with:

    -
    $ l0 environment list
    +
    l0 environment list
    +

    For each environment listed in the previous step, with the exception of the environment named api, issue the following command (replacing <environment_name> with the name of the environment to delete):

    -
    l0 environment delete --wait <environment_name>
    +
    l0 environment delete --wait <environment_name>
    +

    Part 2: Destroy the Layer0 Instance#

    Once all environments have been deleted, the Layer0 instance can be deleted using the l0-setup tool. Run the following command (replacing <instance_name> with the name of the Layer0 instance):

    -
    $ l0-setup destroy <instance_name>
    - - -

    The destroy command is idempotent; if it fails, it is safe to re-attempt multiple times. -If the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources l0-setup is attempting to destroy. -You will need to manually remove these dependencies in order to get the destroy command to complete successfully.

    - - -
    +
    l0-setup destroy <instance_name>
    +
    + + +

    The destroy command is idempotent; if it fails, it is safe to re-attempt multiple times.

    +
    +

    Note

    +

    If the operation continues to fail, it is likely there are resources that were created outside of Layer0 that have dependencies on the resources l0-setup is attempting to destroy. You will need to manually remove these dependencies in order to get the destroy command to complete successfully.

    +
    + + + + + + -
    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/setup/install/index.html b/docs/setup/install/index.html index 214075a76..b40180d0a 100644 --- a/docs/setup/install/index.html +++ b/docs/setup/install/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Install - Layer0 - + @@ -18,476 +18,701 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Install - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - + + -

    Create a new Layer0 Instance#

    +
    +
    + + + + + +

    Create a new Layer0 Instance#

    Prerequisites#

    Before you can install and configure Layer0, you must obtain the following:

    • -

      An AWS account.

      +

      Access to an AWS account

    • -

      An EC2 Key Pair. +

      An EC2 Key Pair This key pair allows you to access the EC2 instances running your Services using SSH. If you have already created a key pair, you can use it for this process. -Otherwise, follow the instructions at aws.amazon.com to create a new key pair. +Otherwise, follow the AWS documentation to create a new key pair. Make a note of the name that you selected when creating the key pair.

    • -

      Terraform v0.9.4+ +

      Terraform v0.11+ We use Terraform to create the resources that Layer0 needs. If you're unfamiliar with Terraform, you may want to check out our introduction. If you're ready to install Terraform, there are instructions in the Terraform documentation.

      @@ -496,7 +721,7 @@

      PrerequisitesPart 1: Download and extract Layer0#

      1. In the Downloads section of the home page, select the appropriate installation file for your operating system. Extract the zip file to a directory on your computer.
      2. -
      3. (Optional) Place the l0 and l0-setup binaries into your system path. +
      4. (Optional) Place the l0 and l0-setup binaries into your system path. For more information about adding directories to your system path, see the following resources:
        • (Windows): How to Edit Your System PATH for Easy Command Line Access in Windows
        • (Linux/macOS): Adding a Directory to the Path
        • @@ -512,55 +737,73 @@

          Part 2: Create an Access Key.

        • -

          Under Security and Identity, click Identity and Access Management.

          +

          Click the Services dropdown menu in the upper left portion of the console page, then type IAM in the text box that appears at the top of the page after you click Services. As you type IAM, a search result will appear below the text box. Click on the IAM service result that appears below the text box.

          +
        • +
        • +

          In the left panel, click Groups, and then confirm that you have a group called Administrators.

          +
        • +

      +
      +

      Is the Administrators group missing in your AWS account?

      +

      If the Administrators group does not already exist, complete the following steps:

      +
        +
      • +

        Click Create New Group. Name the new group Administrators, and then click Next Step.

        +
      • +
      • +

        Check the AdministratorAccess policy to attach the Administrator policy to your new group.

      • -

        Click Groups, and then click Administrators.

        Note


        If the Administrators group does not already exist, complete the following steps:

        1. Click Create New Group. Name the new group "Administrators", and then click Next Step.
        2. Click AdministratorAccess to attach the Administrator policy to your new group.
        3. Click Next Step, and then click Create Group.

      +

      Click Next Step, and then click Create Group.

    • +
    +
    +
    1. -

      Click Users.

      +

      In the left panel, click Users.

    2. -

      Click Create New Users and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to Generate an Access Key for each user, and then click Create.

      +

      Click the New User button and enter a unique user name you will use for Layer0. This user name can be used for multiple Layer0 installations. Check the box next to Programmatic access, and then click the Next: Permissions button.

    3. -

      Once your user account has been created, click Download Credentials to save your access key to a CSV file.

      +

      Make sure the Add user to group button is highlighted. Find and check the box next to the group Administrators. Click Next: Review button to continue. This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe!

    4. -

      In the Users list, click the user account you just created. Under User Actions, click Add User to Groups.

      +

      Review your choices and then click the Create user button.

    5. -

      Select the group Administrators and click Add to Groups. This will make your newly created user an administrator for your AWS account, so be sure to keep your security credentials safe!

      +

      Once your user account has been created, click the Download .csv button to save your access and secret key to a CSV file.

    Part 3: Create a new Layer0 Instance#

    Now that you have downloaded Layer0 and configured your AWS account, you can create your Layer0 instance. From a command prompt, run the following (replacing <instance_name> with a name for your Layer0 instance):

    -
    $ l0-setup init <instance_name>
    +
    l0-setup init <instance_name>
    +

    This command will prompt you for many different inputs. Enter the required values for AWS Access Key, AWS Secret Key, and AWS SSH Key as they come up. All remaining inputs are optional and can be set to their default by pressing enter.

    -
    ...
    +
    ...
     AWS Access Key: The access_key input variable is used to provision the AWS resources
     required for Layer0. This corresponds to the Access Key ID portion of an AWS Access Key.
    -It is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will
    -only use this key for 'l0-setup' commands associated with this Layer0 instance; the
    +It is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will
    +only use this key for 'l0-setup' commands associated with this Layer0 instance; the
     Layer0 API will use its own key with limited permissions to provision AWS resources.
     
     [current: <none>]
    -Please enter a value and press 'enter'.
    +Please enter a value and press 'enter'.
             Input: ABC123xzy
     
     AWS Secret Key: The secret_key input variable is used to provision the AWS resources
     required for Layer0. This corresponds to the Secret Access Key portion of an AWS Access Key.
    -It is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will
    -only use this key for 'l0-setup' commands associated with this Layer0 instance; the
    +It is recommended this key has the 'AdministratorAccess' policy. Note that Layer0 will
    +only use this key for 'l0-setup' commands associated with this Layer0 instance; the
     Layer0 API will use its own key with limited permissions to provision AWS resources.
     
     [current: <none>]
    -Please enter a value and press 'enter'.
    +Please enter a value and press 'enter'.
             Input: ZXY987cba
     
     AWS SSH Key Pair: The ssh_key_pair input variable specifies the name of the
    @@ -570,22 +813,23 @@ 

    Part 3: Create a new Layer0 Instanc that have already been provisioned. [current: <none>] -Please enter a value and press 'enter'. +Please enter a value and press 'enter'. Input: mySSHKey -...

    +... +
    -

    Once the init command has successfully completed, you're ready to actually create the resources needed to use Layer0. +

    Once the init command has successfully completed, you're ready to actually create the resources needed to use Layer0. Run the following command (again, replace <instance_name> with the name you've chosen for your Layer0 instance):

    -
    l0-setup apply <instance_name>
    +
    l0-setup apply <instance_name>
    +
    -

    The first time you run the apply command, it may take around 5 minutes to complete. +

    The first time you run the apply command, it may take around 5 minutes to complete. This command is idempotent; it is safe to run multiple times if it fails the first.

    -

    It's a good idea to run the push command (l0-setup push <instance_name>) after apply commands complete. -This will send a backup of your Layer0 instance's configuration and state to S3. -These files can be grabbed later using the pull command (l0-setup pull <instance_name>).

    -
    +

    At the end of the apply command, your Layer0 instance's configuration and state will be automatically backed up to an S3 bucket. You can manually back up your configuration at any time using the push command. It's a good idea to run this command regularly (l0-setup push <instance_name>) to ensure that your configuration is backed up. +These files can be downloaded at any time using the pull command (l0-setup pull <instance_name>).

    +

    Using a Private Docker Registry

    The procedures in this section are optional, but are highly recommended for production use.

    @@ -593,28 +837,29 @@

    Part 3: Create a new Layer0 Instanc

    If you don't have a config file yet, you can generate one by running docker login [registry-address]. A configuration file will be generated at ~/.docker/config.json.

    To add this authentication to your Layer0 instance, run:

    -
    $ l0-setup init --docker-path=<path/to/config.json> <instance_name>
    +
    l0-setup init --docker-path=<path/to/config.json> <instance_name>
    +
    -

    This will add a rendered file into your Layer0 instance's directory at ~/.layer0/<instance_name>/dockercfg.json.

    -

    You can modify a Layer0 instance's dockercfg.json file and re-run the apply command (l0-setup apply <instance_name>) to make changes to your authentication. -Note that any EC2 instances created prior to changing your dockercfg.json file will need to be manually terminated since they only grab the authentication file during instance creation. +

    This will reconfigure your Layer0 configuration and add a rendered file into your Layer0 instance's directory at ~/.layer0/<instance_name>/dockercfg.json.

    +

    You can modify a Layer0 instance's dockercfg.json file and re-run the apply command (l0-setup apply <instance_name>) to make changes to your authentication. +Note: Any EC2 instances created prior to changing your dockercfg.json file will need to be manually terminated since they only grab the authentication file during instance creation. Terminated EC2 instances will be automatically re-created by autoscaling.

    -
    +

    Using an Existing VPC

    -

    The procedures in this section must be followed to properly install Layer0 into an existing VPC

    +

    The procedures in this section must be followed precisely to properly install Layer0 into an existing VPC

    -

    By default, l0-setup creates a new VPC to place resources. -However, l0-setup can place resources in an existing VPC if it meets the following conditions:

    +

    By default, l0-setup creates a new VPC to place resources. +However, l0-setup can place resources in an existing VPC if the VPC meets all of the following conditions:

    • Has access to the public internet (through a NAT instance or gateway)
    • Has at least 1 public and 1 private subnet
    • The public and private subnets have the tag Tier: Public or Tier: Private, respectively. For information on how to tag AWS resources, please visit the AWS documentation.
    -

    Once you are sure the existing VPC satisfies these requirements, run the init command, +

    Once you are sure the existing VPC satisfies these requirements, run the init command, placing the VPC ID when prompted:

    -
    $ l0-setup init <instance_name>
    +
    l0-setup init <instance_name>
     ...
     VPC ID (optional): The vpc_id input variable specifies an existing AWS VPC to provision
     the AWS resources required for Layer0. If no input is specified, a new VPC will be
    @@ -627,101 +872,109 @@ 

    Part 3: Create a new Layer0 Instanc Note that changing this value will destroy and recreate any existing resources. [current: ] -Please enter a new value, or press 'enter' to keep the current value. - Input: vpc123

    +Please enter a new value, or press 'enter' to keep the current value. + Input: vpc123 +
    -

    Once the command has completed, it is safe to run apply to provision the resources.

    +

    Once the command has completed, it is safe to run apply to provision the resources.

    Part 4: Connect to a Layer0 Instance#

    -

    Once the apply command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the endpoint command.

    -
    $ l0-setup endpoint --insecure <instance_name>
    +

    Once the apply command has run successfully, you can configure the environment variables needed to connect to the Layer0 API using the endpoint command.

    +
    l0-setup endpoint --insecure <instance_name>
     export LAYER0_API_ENDPOINT="https://l0-instance_name-api-123456.us-west-2.elb.amazonaws.com"
     export LAYER0_AUTH_TOKEN="abcDEFG123"
     export LAYER0_SKIP_SSL_VERIFY="1"
    -export LAYER0_SKIP_VERSION_VERIFY="1"
    +export LAYER0_SKIP_VERSION_VERIFY="1" +
    -

    The --insecure flag shows configurations that bypass SSL and version verifications. -This is required as the Layer0 API created uses a self-signed certificate by default. +

    +

    Danger

    +

    The --insecure flag shows configurations that bypass SSL and version verifications. +This is required as the Layer0 API created uses a self-signed SSL certificate by default. These settings are not recommended for production use!

    -

    The endpoint command supports a --syntax option, which can be used to turn configuration into a single line:

    +
    +

    The endpoint command supports a --syntax option, which can be used to turn configuration into a single line:

      -
    • Bash (default) - $ eval "$(l0-setup endpoint --insecure <instance_name>)"
    • -
    • Powershell - $ l0-setup endpoint --insecure --syntax=powershell <instance_name> | Out-String | Invoke-Expression
    • +
    • Bash (default) - eval "$(l0-setup endpoint --insecure <instance_name>)"
    • +
    • Powershell - l0-setup endpoint --insecure --syntax=powershell <instance_name> | Out-String | Invoke-Expression
    - - - - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/setup/upgrade/index.html b/docs/setup/upgrade/index.html index f4424c96a..0e7b5e7cf 100644 --- a/docs/setup/upgrade/index.html +++ b/docs/setup/upgrade/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Upgrade - Layer0 - + @@ -18,533 +18,701 @@ + + + + + + + + + + + + + + + + + + - - - - - - + + Upgrade - Layer0 + - + + + + - - - - - + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    Upgrade a Layer0 Instance#

    +
    +
    + + + + + +

    Upgrade a Layer0 Instance#

    This section provides procedures for upgrading your Layer0 installation to the latest version. This assumes you are using Layer0 version v0.10.0 or later.

    -
    -

    Note

    +
    +

    Warning

    Layer0 does not support updating MAJOR or MINOR versions in place unless explicitly stated otherwise. -Users will need to destroy and re-create Layer0 instances in these circumstances.

    +Users will either need to create a new Layer0 instance and migrate to it or destroy and re-create their Layer0 instance in these circumstances.

    -

    Run the upgrade command, replacing <instance_name> and <version> with the name of the Layer0 instance and new version, respectively:

    -
    $ l0-setup upgrade <instance_name> <version>
    +

    Run the upgrade command, replacing <instance_name> and <version> with the name of the Layer0 instance and new version, respectively:

    +
    l0-setup upgrade <instance_name> <version>
    +

    This will prompt you about the updated source and version inputs changing. If you are not satisfied with the changes, exit the application during the prompts. -For full control on changing inputs, please use the set command.

    +For full control on changing inputs, use the set command.

    Example Usage

    -
    $ l0-setup upgrade mylayer0 v0.10.1
    -This will update the 'version' input
    +
    l0-setup upgrade mylayer0 v0.10.1
    +
    +This will update the 'version' input
             From: [v0.10.0]
             To:   [v0.10.1]
     
    -        Press 'enter' to accept this change:
    -This will update the 'source' input
    +        Press 'enter' to accept this change:
    +This will update the 'source' input
             From: [github.com/quintilesims/layer0//setup/module?ref=v0.10.0]
             To:   [github.com/quintilesims/layer0//setup/module?ref=v0.10.1]
     
    -        Press 'enter' to accept this change:
    +        Press 'enter' to accept this change:
             ...
     
    -Everything looks good! You are now ready to run 'l0-setup apply mylayer0'
    +Everything looks good! You are now ready to run 'l0-setup apply mylayer0' +
    -

    As stated by the command output, run the apply command to apply the changes to the Layer0 instance. +

    As stated by the command output, run the apply command to apply the changes to the Layer0 instance. If any errors occur, please contact the Layer0 team.

    - - -
    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml index ad9959550..eec1f6429 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -4,7 +4,7 @@ https://github.com/quintilesims/layer0// - 2017-08-03 + 2018-01-12 daily @@ -12,7 +12,7 @@ https://github.com/quintilesims/layer0//releases/ - 2017-08-03 + 2018-01-12 daily @@ -20,7 +20,7 @@ https://github.com/quintilesims/layer0//intro/ - 2017-08-03 + 2018-01-12 daily @@ -29,19 +29,19 @@ https://github.com/quintilesims/layer0//setup/install/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//setup/upgrade/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//setup/destroy/ - 2017-08-03 + 2018-01-12 daily @@ -51,37 +51,25 @@ https://github.com/quintilesims/layer0//guides/walkthrough/intro/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//guides/walkthrough/deployment-1/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//guides/walkthrough/deployment-2/ - 2017-08-03 - daily - - - - https://github.com/quintilesims/layer0//guides/walkthrough/deployment-3/ - 2017-08-03 - daily - - - - https://github.com/quintilesims/layer0//guides/terraform_beyond_layer0/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//guides/one_off_task/ - 2017-08-03 + 2018-01-12 daily @@ -91,55 +79,55 @@ https://github.com/quintilesims/layer0//reference/cli/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/setup-cli/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/terraform_introduction/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/terraform-plugin/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/updateservice/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/consul/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/task_definition/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/architecture/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//reference/ecr/ - 2017-08-03 + 2018-01-12 daily @@ -149,13 +137,13 @@ https://github.com/quintilesims/layer0//troubleshooting/commonissues/ - 2017-08-03 + 2018-01-12 daily https://github.com/quintilesims/layer0//troubleshooting/ssh/ - 2017-08-03 + 2018-01-12 daily diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 2a527eea4..118c00da0 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -1,15 +1,20 @@ + +:root { + --theme-color:#e84e40; +} + .article h2 { font-size:18pt; } .article h3 { - color: #e84e40; + color: var(--theme-color); font-size:14pt; font-weight:normal; } .article h4 { - color:#e84e40; + color: var(--theme-color); font-weight:bold; font-style:normal; } @@ -64,10 +69,6 @@ color: #ddd; } -.admonition.note a { - color: #7ff; -} - .article a.note { color:#ffffff; border-bottom:1px dotted; @@ -75,5 +76,71 @@ .article a.note:hover { border-bottom: 1px solid; - color:#e84e40; + color: var(--theme-color); +} + +.md-nav__link:hover { + color: var(--theme-color); +} + +.md-typeset h2[id] .headerlink:focus, .md-typeset h2[id]:hover .headerlink:hover, .md-typeset h2[id]:target .headerlink { + color: var(--theme-color); +} + +/* Change Note Box Colors */ +.md-typeset .admonition, .md-typeset details { + -webkit-box-shadow: 0 2px 2px 0 rgba(0,0,0,.14), 0 1px 5px 0 rgba(0,0,0,.12), 0 3px 1px -2px rgba(0,0,0,.2); + box-shadow: 0 2px 2px 0 rgba(0,0,0,.14), 0 1px 5px 0 rgba(0,0,0,.12), 0 3px 1px -2px rgba(0,0,0,.2); + position: relative; + margin: 1.5625em 0; + padding: 0 1.2rem; + border-left: .4rem solid var(--theme-color); + border-radius: .2rem; + font-size: 1.28rem; + overflow: auto; +} + +.md-typeset .admonition>.admonition-title, .md-typeset .admonition>summary, .md-typeset details>.admonition-title, .md-typeset details>summary { + margin: 0 -1.2rem; + padding: .8rem 1.2rem .8rem 4rem; + border-bottom: .1rem solid rgba(68,138,255,.1); + background-color: #e84e404d; + font-weight: 700; +} + +.md-typeset .admonition>.admonition-title:before, .md-typeset .admonition>summary:before, .md-typeset details>.admonition-title:before, .md-typeset details>summary:before { + position: absolute; + left: 1.2rem; + color: var(--theme-color); + font-size: 2rem; + content: "\E3C9"; +} + +/* Code Block */ +.md-typeset code, .md-typeset pre { + background-color: rgba(237, 237, 237, 0.226); + color: #0a1c25; + font-size: 85%; +} + +/* Logo Header */ +.md-header-nav__button.md-logo img { + width: 42px; + height: 42px; +} + +a.md-content__icon, .md-footer-nav__button, .md-header-nav__button.md-logo, .md-nav__button, .md-search-result__article--document:before { + display: block; + margin: .4rem; + padding: 0.2rem; + font-size: 2.4rem; + cursor: pointer; +} + +/* Title Size */ +.md-header-nav__title { + padding: 0; + display: block; + font-size: 2.0rem; + line-height: 4.8rem; } diff --git a/docs/troubleshooting/commonissues/index.html b/docs/troubleshooting/commonissues/index.html index 278c27adc..be04fbafe 100644 --- a/docs/troubleshooting/commonissues/index.html +++ b/docs/troubleshooting/commonissues/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Common Issues - Layer0 - + @@ -18,452 +18,689 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Common Issues - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - + +
  • + + Byte Order Marks (BOM) in Dockerrun file + + +
  • + + + + + + +
  • + + "AWS Error: the key pair '' does not exist (code 'ValidationError')" with l0-setup + + +
  • + + + + + -
    -
    -
    +
    +
    + + -

    Common issues and their solutions#

    +
    +
    + + + + + +

    Common issues and their solutions#

    "Connection refused" error when executing Layer0 commands#

    -

    When executing commands using the Layer0 CLI, you may see the following error message: "Get http://localhost:9090/command/: dial tcp 127.0.0.1:9090: connection refused", where command is the Layer0 command you are trying to execute.

    -

    This error indicates that your Layer0 environment variables have not been set for the current session. See the "Configure environment variables" section of the Layer0 installation guide for instructions for setting up your environment variables.

    +

    When executing commands using the Layer0 CLI, you may see the following error message:

    +

    Get http://localhost:9090/command/: dial tcp 127.0.0.1:9090: connection refused

    +

    Where command is the Layer0 command you are trying to execute.

    +

    This error indicates that your Layer0 environment variables have not been set for the current session. See the "Connect to a Layer0 Instance" section of the Layer0 installation guide for instructions for setting up your environment variables.


    "Invalid Dockerrun.aws.json" error when creating a deploy#

    Byte Order Marks (BOM) in Dockerrun file#

    @@ -473,31 +710,32 @@

    Byte Order Marks (BOM) in Docker
  • At the command line, type the following to remove the BOM:

      -
    • (Linux/OS X) tail -c +4 DockerrunFile > DockerrunFileNew -

      Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM.
    • +
    • (Linux/OS X)
    +

    tail -c +4 DockerrunFile > DockerrunFileNew

    +

    Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM.

  • Alternatively, you can use the dos2unix file converter to remove the BOM from your Dockerrun files. Dos2unix is available for Windows, Linux and Mac OS.

    To remove the BOM using dos2unix:

      -
    • -

      At the command line, type the following:

      -
        -
      • dos2unix --remove-bom -n DockerrunFile DockerrunFileNew -

        Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM.
      • -
      -
    • +
    • At the command line, type the following:
    +
    dos2unix --remove-bom -n DockerrunFile DockerrunFileNew
    +
    + + +

    Replace DockerrunFile with the path to your Dockerrun file, and DockerrunFileNew with a new name for the Dockerrun file without the BOM.


    "AWS Error: the key pair '' does not exist (code 'ValidationError')" with l0-setup#

    -

    This occurs when you pass a non-existent EC2 keypair to l0-setup. To fix this, follow the instructions for creating an EC2 Key Pair.

    +

    This occurs when you pass an invalid EC2 keypair to l0-setup. To fix this, follow the instructions for creating an EC2 Key Pair.

      -
    1. After you've created a new EC2 Key Pair, run the following command: -
        -
      • l0-setup plan prefix -var key_pair=keypair
      • -
    2. +
    3. After you've created a new EC2 Key Pair, use l0-setup init to reconfigure your instance:
    +
    l0-setup init --aws-ssh-key-pair keypair
    +
    + + - - -
    +
    + + + + + - - - -
    -
    -
    -
    -
    -
    -
    + + + + +
    + + + + + + + \ No newline at end of file diff --git a/docs/troubleshooting/ssh/index.html b/docs/troubleshooting/ssh/index.html index f0a2c5118..107102cf1 100644 --- a/docs/troubleshooting/ssh/index.html +++ b/docs/troubleshooting/ssh/index.html @@ -1,16 +1,16 @@ + + + + - - - - - + - - + + + - Secure Shell (SSH) - Layer0 - + @@ -18,427 +18,589 @@ + + + + + + + + + + + + + + + + + - - - - - - - + + Secure Shell (SSH) - Layer0 + - - - - + + + + - + + + + - - - + - - - - - - - - -
    -
    -
    - - - -
    -
    + +
    + + + +
    +
    + + +
    +
    +
    + - - -
  • - Guides - +
  • - - -
  • - Reference - +
  • - - -
  • - Troubleshooting - +
  • - - - -
    - The author -
      + +
    + +
    +
    +
    - - -
    -
    - +
    +
    +
    + + -
    -
    -
    +
    +
    +
    + -

    Secure Shell (SSH)#

    +
    +
    + + + + + +

    Secure Shell (SSH)#

    You can use Secure Shell (SSH) to access your Layer0 environment(s).

    By default, Layer0 Setup asks for an EC2 key pair when creating a new Layer0. This key pair is associated with all machines that host your Layer0 Services. This means you can use SSH to log into the underlying Docker host to perform tasks such as troubleshooting failing containers or viewing logs. For information about creating an EC2 key pair, see Install and Configure Layer0.

    @@ -450,85 +612,93 @@

    To SSH into a Servicel0 loadbalancer addport <name> 2222:22/tcp +
    l0 loadbalancer addport <name> 2222:22/tcp
    +
    1. SSH into your Service by supplying the load balancer url and key pair file name.
    -
    ssh -i <key pair path and file name> ec2-user@<load balancer url> -p 2222
    +
    ssh -i <key pair path and file name> ec2-user@<load balancer url> -p 2222
    +
    1. If required, Use Docker to access a specific container with Bash.
    -
    docker exec -it <container id> /bin/bash
    +
    docker exec -it <container id> /bin/bash
    +

    Remarks#

    You can get the load balancer url from the Load Balancers section of your Layer0 AWS console.

    -

    Use the loadbalancer dropport subcommand to remove a port configuration from an existing Layer0 load balancer.

    +

    Use the l0 loadbalancer dropport subcommand to remove a port configuration from an existing Layer0 load balancer.

    You cannot change the key pair after a Layer0 has been created. If you lose your key pair or need to generate a new one, you will need to create a new Layer0.

    If your Service is behind a private load balancer, or none at all, you can either re-create your Service behind a public load balancer, use an existing public load balancer as a "jump" point, or create a new Layer0 Service behind a public load balancer to serve as a "jump" point.

    - - -

    +
    + + + + + - - - - - - -
    -
    -
    -
    -
    -
    -
    -
    - - - + + + + + + + \ No newline at end of file diff --git a/scripts/flow.sh b/scripts/flow.sh index 5ff45602f..988a1f4ea 100755 --- a/scripts/flow.sh +++ b/scripts/flow.sh @@ -15,7 +15,7 @@ update_api() { popd pushd $LAYER0_PATH/setup - go run main.go set "$LAYER0_PREFIX" --input version="$GIT_HASH" + go run main.go set "$LAYER0_PREFIX" --input layer0_version="$GIT_HASH" popd } @@ -27,7 +27,7 @@ update_runner() { popd pushd $LAYER0_PATH/setup - go run main.go set "$LAYER0_PREFIX" --input version="$GIT_HASH" + go run main.go set "$LAYER0_PREFIX" --input layer0_version="$GIT_HASH" popd } diff --git a/setup/command/apply.go b/setup/command/apply.go index cb6ac14fb..3e1d3f49e 100644 --- a/setup/command/apply.go +++ b/setup/command/apply.go @@ -3,6 +3,7 @@ package command import ( "fmt" + "github.com/quintilesims/layer0/setup/instance" "github.com/urfave/cli" ) @@ -27,18 +28,23 @@ func (f *CommandFactory) Apply() cli.Command { return err } - instance := f.NewInstance(args["NAME"]) - if err := instance.Apply(!c.Bool("quick")); err != nil { + inst := f.NewInstance(args["NAME"]) + if err := inst.Apply(!c.Bool("quick")); err != nil { return err } if c.Bool("push") { - provider, err := f.newAWSProviderHelper(c) + region, err := inst.Output(instance.OUTPUT_AWS_REGION) if err != nil { return err } - if err := instance.Push(provider.S3); err != nil { + provider, err := f.newAWSProviderHelper(c, region) + if err != nil { + return err + } + + if err := inst.Push(provider.S3); err != nil { return err } } diff --git a/setup/command/aws.go b/setup/command/aws.go index 793d92fb4..6458eb4dd 100644 --- a/setup/command/aws.go +++ b/setup/command/aws.go @@ -23,15 +23,10 @@ var awsFlags = []cli.Flag{ Usage: "secret key portion on an AWS key", EnvVar: config.AWS_SECRET_ACCESS_KEY, }, - cli.StringFlag{ - Name: "aws-region", - Usage: "AWS region", - EnvVar: config.AWS_REGION, - }, } -func (f *CommandFactory) newAWSProviderHelper(c *cli.Context) (*aws.Provider, error) { - // use default credentials and region settings +func (f *CommandFactory) newAWSProviderHelper(c *cli.Context, region string) (*aws.Provider, error) { + // first grab default config settings config := defaults.Get().Config // use static credentials if passed in by the user @@ -48,20 +43,17 @@ func (f *CommandFactory) newAWSProviderHelper(c *cli.Context) (*aws.Provider, er if _, err := config.Credentials.Get(); err != nil { if err, ok := err.(awserr.Error); ok && err.Code() == "NoCredentialProviders" { text := "No valid AWS credentials found. Please specify an AWS access key and secret key using " - text += "their corresponding flags or environment variables" + text += "their corresponding flags or environment variables." + text += "l0-setup init --aws-access-key --aws-secret-key " return nil, fmt.Errorf(text) } return nil, err } - // use region if passed in by the user - config.WithRegion(aws.DEFAULT_AWS_REGION) - if region := c.String("aws-region"); region != "" { - config.WithRegion(region) - } else { - logrus.Debugf("aws-region was not specified. Using default") - } + // ensure that the correct region is set for AWS services + // that have region-specific operations + config.WithRegion(region) return f.NewAWSProvider(config), nil } diff --git a/setup/command/list.go b/setup/command/list.go index 39f4a123e..9338cd8a8 100644 --- a/setup/command/list.go +++ b/setup/command/list.go @@ -4,6 +4,7 @@ import ( "fmt" "sort" + "github.com/quintilesims/layer0/setup/aws" "github.com/quintilesims/layer0/setup/instance" "github.com/urfave/cli" ) @@ -58,7 +59,11 @@ func (f *CommandFactory) List() cli.Command { } func (f *CommandFactory) addRemoteInstances(c *cli.Context, current map[string]status) error { - provider, err := f.newAWSProviderHelper(c) + // The default AWS region is passed here (as opposed to other l0-setup + // operations) because listing buckets from S3 is a region-agnostic + // operation. All S3 buckets in the AWS account will be retrieved + // regardless of what region is provided. + provider, err := f.newAWSProviderHelper(c, aws.DEFAULT_AWS_REGION) if err != nil { return err } diff --git a/setup/command/pull.go b/setup/command/pull.go index 0dd00367d..94b16189a 100644 --- a/setup/command/pull.go +++ b/setup/command/pull.go @@ -2,7 +2,12 @@ package command import ( "fmt" + "strings" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + aws_provider "github.com/quintilesims/layer0/setup/aws" "github.com/urfave/cli" ) @@ -18,7 +23,31 @@ func (f *CommandFactory) Pull() cli.Command { return err } - provider, err := f.newAWSProviderHelper(c) + // Use the default AWS region first to retrieve the list of buckets + provider, err := f.newAWSProviderHelper(c, aws_provider.DEFAULT_AWS_REGION) + if err != nil { + return err + } + + remoteInstanceBucket, err := getRemoteInstanceBucket(provider.S3, args["NAME"]) + if err != nil { + return err + } + + region, err := getBucketLocation(provider.S3, remoteInstanceBucket) + if err != nil { + return err + } + + if region == "" { + // See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html + // When GetBucketLocation returns an empty string, the bucket is in us-east-1 + region = "us-east-1" + } + + // Change the AWS provider configuration to match the region of the bucket + // to pull from + provider, err = f.newAWSProviderHelper(c, region) if err != nil { return err } @@ -33,3 +62,32 @@ func (f *CommandFactory) Pull() cli.Command { }, } } + +func getRemoteInstanceBucket(s s3iface.S3API, instanceName string) (string, error) { + listBucketsOutput, err := s.ListBuckets(&s3.ListBucketsInput{}) + if err != nil { + return "", err + } + + for _, bucket := range listBucketsOutput.Buckets { + bucketName := aws.StringValue(bucket.Name) + + if split := strings.Split(bucketName, "-"); len(split) == 3 && split[0] == "layer0" && split[1] == instanceName { + return bucketName, nil + } + } + + return "", fmt.Errorf("No S3 bucket found for given instance name") +} + +func getBucketLocation(s s3iface.S3API, bucketName string) (string, error) { + getBucketLocationInput := &s3.GetBucketLocationInput{} + getBucketLocationInput.SetBucket(bucketName) + + getBucketLocationOutput, err := s.GetBucketLocation(getBucketLocationInput) + if err != nil { + return "", err + } + + return aws.StringValue(getBucketLocationOutput.LocationConstraint), nil +} diff --git a/setup/command/push.go b/setup/command/push.go index 6e00c1669..e0c91fcb2 100644 --- a/setup/command/push.go +++ b/setup/command/push.go @@ -3,6 +3,7 @@ package command import ( "fmt" + "github.com/quintilesims/layer0/setup/instance" "github.com/urfave/cli" ) @@ -18,13 +19,18 @@ func (f *CommandFactory) Push() cli.Command { return err } - provider, err := f.newAWSProviderHelper(c) + inst := f.NewInstance(args["NAME"]) + region, err := inst.Output(instance.OUTPUT_AWS_REGION) if err != nil { return err } - instance := f.NewInstance(args["NAME"]) - if err := instance.Push(provider.S3); err != nil { + provider, err := f.newAWSProviderHelper(c, region) + if err != nil { + return err + } + + if err := inst.Push(provider.S3); err != nil { return err } diff --git a/tests/README.md b/tests/README.md index f13f18787..2b5b48fca 100644 --- a/tests/README.md +++ b/tests/README.md @@ -53,6 +53,11 @@ This is a simple web service whose behavior can be changed through an API. Checkout the `TestDeadServiceRecreated` test to see how to use the STS client in a test. +### Running the Tests +To run the system and stress tests, it is required you have the [Layer0 Terraform Plugin](http://layer0.ims.io/reference/terraform-plugin/) in your system's `PATH` variable. +From the `test/system` or `test/stress` directory, run `make test` to initialize and execute all of the tests. +For more explicit behavior, see the **Test Flags** section below. + ### Test Flags In addition to the standard `go test` flags, the following have been implemented for system tests: diff --git a/tests/clients/layer0_test_client.go b/tests/clients/layer0_test_client.go index 188a071ca..4e3ba81ed 100644 --- a/tests/clients/layer0_test_client.go +++ b/tests/clients/layer0_test_client.go @@ -1,18 +1,20 @@ package clients import ( - "testing" - "github.com/quintilesims/layer0/cli/client" "github.com/quintilesims/layer0/common/models" ) +type Tester interface { + Fatal(...interface{}) +} + type Layer0TestClient struct { - T *testing.T + T Tester Client *client.APIClient } -func NewLayer0TestClient(t *testing.T, endpoint, token string) *Layer0TestClient { +func NewLayer0TestClient(t Tester, endpoint, token string) *Layer0TestClient { return &Layer0TestClient{ T: t, Client: client.NewAPIClient(client.Config{ @@ -22,8 +24,8 @@ func NewLayer0TestClient(t *testing.T, endpoint, token string) *Layer0TestClient } } -func (l *Layer0TestClient) CreateTask(taskName, environmentID, deployID string, copies int, overrides []models.ContainerOverride) string { - jobID, err := l.Client.CreateTask(taskName, environmentID, deployID, copies, overrides) +func (l *Layer0TestClient) CreateTask(taskName, environmentID, deployID string, overrides []models.ContainerOverride) string { + jobID, err := l.Client.CreateTask(taskName, environmentID, deployID, overrides) if err != nil { l.T.Fatal(err) } @@ -95,6 +97,15 @@ func (l *Layer0TestClient) GetTask(id string) *models.Task { return task } +func (l *Layer0TestClient) GetDeploy(id string) *models.Deploy { + deploy, err := l.Client.GetDeploy(id) + if err != nil { + l.T.Fatal(err) + } + + return deploy +} + func (l *Layer0TestClient) GetEnvironment(id string) *models.Environment { environment, err := l.Client.GetEnvironment(id) if err != nil { @@ -104,6 +115,51 @@ func (l *Layer0TestClient) GetEnvironment(id string) *models.Environment { return environment } +func (l *Layer0TestClient) GetLoadBalancer(id string) *models.LoadBalancer { + loadBalancer, err := l.Client.GetLoadBalancer(id) + if err != nil { + l.T.Fatal(err) + } + + return loadBalancer +} + +func (l *Layer0TestClient) ListDeploys() []*models.DeploySummary { + deploys, err := l.Client.ListDeploys() + if err != nil { + l.T.Fatal(err) + } + + return deploys +} + +func (l *Layer0TestClient) ListEnvironments() []*models.EnvironmentSummary { + environments, err := l.Client.ListEnvironments() + if err != nil { + l.T.Fatal(err) + } + + return environments +} + +func (l *Layer0TestClient) ListLoadBalancers() []*models.LoadBalancerSummary { + loadBalancers, err := l.Client.ListLoadBalancers() + if err != nil { + l.T.Fatal(err) + } + + return loadBalancers +} + +func (l *Layer0TestClient) ListServices() []*models.ServiceSummary { + services, err := l.Client.ListServices() + if err != nil { + l.T.Fatal(err) + } + + return services +} + func (l *Layer0TestClient) ListTasks() []*models.TaskSummary { tasks, err := l.Client.ListTasks() if err != nil { diff --git a/tests/smoke/README.md b/tests/smoke/README.md index cc6d801a7..fec4501c8 100644 --- a/tests/smoke/README.md +++ b/tests/smoke/README.md @@ -8,8 +8,12 @@ Environment Variables must be populated with the contents of `l0-setup endpoint #### Running +Make sure you have cleared out any existing environments, services, etc. from your Layer0 instance before running the test. +You can use `flow delete` to accomplish this. + From the `layer0` directory, run `make smoketest` -From the `layer0/tests/smoke` directory, run `make test` + +Or, from the `layer0/tests/smoke` directory, run `make test` #### Tips and Tricks diff --git a/tests/smoke/admin.bats b/tests/smoke/admin.bats index f5622f099..b37c5accf 100644 --- a/tests/smoke/admin.bats +++ b/tests/smoke/admin.bats @@ -11,7 +11,3 @@ @test "admin debug" { l0 admin debug } - -@test "admin scale api" { - l0 admin scale api -} diff --git a/tests/stress/Makefile b/tests/stress/Makefile index e395e4a33..f9d663ceb 100644 --- a/tests/stress/Makefile +++ b/tests/stress/Makefile @@ -1,7 +1,10 @@ -test: - go test -v -parallel 10 -timeout 1h +init: + pushd module ; \ + ln -s -f $$(which terraform-provider-layer0) . ; \ + terraform init ; \ + popd ; \ -benchmark: - go test -v -run NOTHING -bench . -timeout 2h +test: init + go test -v -debug -run NOTHING -bench . -timeout 2h -.PHONY: test benchmark +.PHONY: init test diff --git a/tests/stress/main_test.go b/tests/stress/main_test.go index ec91191a7..df5b3134f 100644 --- a/tests/stress/main_test.go +++ b/tests/stress/main_test.go @@ -49,9 +49,6 @@ func teardown() { fmt.Println("Error occurred during teardown: ", err) os.Exit(1) } - fmt.Println("Error occurred during teardown: ", err) - os.Exit(1) - } } } diff --git a/tests/stress/stress_test.go b/tests/stress/stress_test.go index 4dc34f696..3ef1ca005 100644 --- a/tests/stress/stress_test.go +++ b/tests/stress/stress_test.go @@ -71,7 +71,6 @@ func runTest(b *testing.B, c StressTestCase) { tftest.Dir("module"), tftest.Vars(vars), tftest.DryRun(*dry), - tftest.Log(b), ) layer0 := clients.NewLayer0TestClient(b, vars["endpoint"], vars["token"]) diff --git a/tests/system/Makefile b/tests/system/Makefile index 8c5870957..19ebeeda4 100644 --- a/tests/system/Makefile +++ b/tests/system/Makefile @@ -1,7 +1,17 @@ -test: - go test -v -short -parallel 5 -timeout 15m +init: + for case in cases/*; do \ + if [ -d "$$case" ] && [ "$$case" != "cases/modules" ]; then \ + pushd "$$case" ; \ + ln -s -f $$(which terraform-provider-layer0) . ; \ + terraform init ; \ + popd ; \ + fi \ + done -test-all: +test: init go test -v -parallel 10 -timeout 1h -.PHONY: test test-all +test-short: init + go test -v -short -parallel 5 -timeout 15m + +.PHONY: init test test-short diff --git a/tests/system/cases/datasources/outputs.tf b/tests/system/cases/datasources/outputs.tf index 9a6905f13..78b59791d 100644 --- a/tests/system/cases/datasources/outputs.tf +++ b/tests/system/cases/datasources/outputs.tf @@ -105,7 +105,7 @@ output "load_balancer_private" { } output "load_balancer_private_expected" { - value = "${layer0_load_balancer.datasources.private}" + value = "false" } output "load_balancer_url" { diff --git a/tests/system/system_test.go b/tests/system/system_test.go index 464aea62b..d971131e8 100644 --- a/tests/system/system_test.go +++ b/tests/system/system_test.go @@ -24,20 +24,14 @@ func NewSystemTest(t *testing.T, dir string, vars map[string]string) *SystemTest tfContext := tftest.NewTestContext(t, tftest.Dir(dir), tftest.Vars(vars), - tftest.DryRun(*dry), - tftest.Log(log)) + tftest.DryRun(*dry)) layer0 := clients.NewLayer0TestClient(t, vars["endpoint"], vars["token"]) - // download modules using terraform get if _, err := tfContext.Terraformf("init"); err != nil { t.Fatal(err) } - if _, err := tfContext.Terraformf("get"); err != nil { - t.Fatal(err) - } - return &SystemTest{ Terraform: tfContext, Layer0: layer0, diff --git a/vendor/github.com/quintilesims/tftest/context.go b/vendor/github.com/quintilesims/tftest/context.go index d23365fca..01225662b 100644 --- a/vendor/github.com/quintilesims/tftest/context.go +++ b/vendor/github.com/quintilesims/tftest/context.go @@ -2,13 +2,12 @@ package tftest import ( "fmt" - "os" + "log" "os/exec" "strings" ) type Context struct { - Logger Logger Vars map[string]string dir string dryRun bool @@ -16,9 +15,8 @@ type Context struct { func NewContext(options ...ContextOption) *Context { context := &Context{ - Logger: NewIOLogger(os.Stdout), - Vars: map[string]string{}, - dir: ".", + Vars: map[string]string{}, + dir: ".", } for _, option := range options { @@ -36,12 +34,16 @@ func (c *Context) Dir() string { return c.dir } +func (c *Context) Init() ([]byte, error) { + return c.Terraformf("init") +} + func (c *Context) Apply() ([]byte, error) { if c.dryRun { return c.Terraformf("plan") } - return c.Terraformf("apply") + return c.Terraformf("apply", "-auto-approve") } func (c *Context) Destroy() ([]byte, error) { @@ -78,16 +80,13 @@ func (c *Context) Terraformf(command string, args ...string) ([]byte, error) { cmd.Env = env cmd.Dir = c.dir - c.Logger.Logf("Running %v from %s", cmd.Args, cmd.Dir) + log.Printf("[DEBUG] Running %v from %s", cmd.Args, cmd.Dir) output, err := cmd.CombinedOutput() if err != nil { - text := fmt.Sprintf("Error running %v from %s: %v\n", cmd.Args, cmd.Dir, err) - for _, line := range strings.Split(string(output), "\n") { - text += line + "\n" - } - - return nil, fmt.Errorf(text) + text := fmt.Sprintf("%v\n%v", string(output), err) + log.Printf("[ERROR] Error running %v from %s: %v\n", cmd.Args, cmd.Dir, text) + return nil, err } return output, nil diff --git a/vendor/github.com/quintilesims/tftest/logger.go b/vendor/github.com/quintilesims/tftest/logger.go deleted file mode 100644 index e0067e270..000000000 --- a/vendor/github.com/quintilesims/tftest/logger.go +++ /dev/null @@ -1,27 +0,0 @@ -package tftest - -import ( - "fmt" - "io" -) - -type Logger interface { - Log(args ...interface{}) - Logf(format string, args ...interface{}) -} - -type IOLogger struct { - writer io.Writer -} - -func NewIOLogger(w io.Writer) *IOLogger { - return &IOLogger{writer: w} -} - -func (i *IOLogger) Log(args ...interface{}) { - fmt.Fprint(i.writer, args...) -} - -func (i *IOLogger) Logf(format string, args ...interface{}) { - fmt.Fprintf(i.writer, format, args...) -} diff --git a/vendor/github.com/quintilesims/tftest/options.go b/vendor/github.com/quintilesims/tftest/options.go index 439c0f4a0..153c0d127 100644 --- a/vendor/github.com/quintilesims/tftest/options.go +++ b/vendor/github.com/quintilesims/tftest/options.go @@ -2,12 +2,6 @@ package tftest type ContextOption func(*Context) -func Log(logger Logger) ContextOption { - return func(c *Context) { - c.Logger = logger - } -} - func Dir(dir string) ContextOption { return func(c *Context) { c.dir = dir diff --git a/vendor/github.com/quintilesims/tftest/test_context.go b/vendor/github.com/quintilesims/tftest/test_context.go index c4905bb6d..b53d560b5 100644 --- a/vendor/github.com/quintilesims/tftest/test_context.go +++ b/vendor/github.com/quintilesims/tftest/test_context.go @@ -17,6 +17,12 @@ func NewTestContext(t Tester, options ...ContextOption) *TestContext { } } +func (c *TestContext) Init() { + if _, err := c.Context.Init(); err != nil { + c.t.Fatal(err) + } +} + func (c *TestContext) Apply() { if _, err := c.Context.Apply(); err != nil { c.t.Fatal(err) diff --git a/vendor/vendor.json b/vendor/vendor.json index c61454059..c334a4038 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -3273,10 +3273,10 @@ "revisionTime": "2017-04-20T21:30:45Z" }, { - "checksumSHA1": "ObB94Sg/UH32j6sGBtG6J2BQmaI=", + "checksumSHA1": "53pGZaKThdchJJZafXHSVYfLZhE=", "path": "github.com/quintilesims/tftest", - "revision": "50e64a47e2216f68f260bbd17f8d8d0f81cbc127", - "revisionTime": "2017-08-23T23:42:00Z" + "revision": "70597d446846d3de7adaabbf000469cd172387b4", + "revisionTime": "2018-01-08T22:19:58Z" }, { "checksumSHA1": "pvg8L4xN+zO9HLJislpVeHPPLjM=",