Skip to content

Commit

Permalink
docs: finish quickstart page.
Browse files Browse the repository at this point in the history
Add test that duplicates quickstart user steps.
  • Loading branch information
aszs committed Nov 12, 2024
1 parent b2334f8 commit c9abb35
Show file tree
Hide file tree
Showing 8 changed files with 129 additions and 57 deletions.
4 changes: 4 additions & 0 deletions docs/cloudmap.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,3 +173,7 @@ This enables the cloudmap to used like a package manager to resolve the dependen
Most code isn't used directly, instead there is a build or packaging process to create the the artifact (for example, an executable binary, a software package, or a container image) that actually used when an application is deployed; the `artifacts` section of a cloud map lists artifacts.

Artifacts are declared separately from repositories because there isn't necessarily a way to determine how artifacts are built, but that relationship can be expressed with the `builds` annotation in the `notable` section. In the future, the cloud map schema will be extended to better represent the build processes build artifacts from code in repositories.

## Future directions

Could cloud maps evolve into a package manager for the cloud? See <https://github.com/onecommons/cloudmap/blob/main/README.md> for more on our vision.
21 changes: 13 additions & 8 deletions docs/examples/quickstart_deployment_blueprints.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,20 @@
from tosca_repositories.std.aws import EC2Compute
import tosca
from tosca_repositories.std.aws.compute import EC2Compute
from tosca_repositories.std.aws.db import AwsRdsPostgres
from tosca_repositories.std import k8s
from tosca_repositories.std.dns_services import Route53DNSZone

class production(tosca.DeploymentBlueprint):
_cloud = unfurl.relationships.ConnectsToAWSAccount

host = std.ContainerComputeHost(
host=EC2Compute(disk_size=Inputs.disk_size,
num_cpus=2,
mem_size=Inputs.mem_size,
))
host = std.HttpsProxyContainerComputeHost(
host=EC2Compute(
disk_size=Inputs.disk_size,
num_cpus=2,
mem_size=Inputs.mem_size,
),
dns=Route53DNSZone(name="example.com"),
)
db = AwsRdsPostgres()

class dev(tosca.DeploymentBlueprint):
Expand All @@ -20,5 +25,5 @@ class dev(tosca.DeploymentBlueprint):
labels={"kompose.volume.size": Inputs.disk_size}
)
db = std.PostgresDBInstance(
database_name="my_db",
host_requirement=k8s.PrivateK8sContainerHost())
database_name="my_db", host_requirement=k8s.PrivateK8sContainerHost()
)
21 changes: 8 additions & 13 deletions docs/examples/quickstart_service_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,15 @@ class Inputs(TopologyInputs):
host = std.ContainerHost()

container = std.ContainerService(
environment=unfurl.datatypes.EnvironmentVariables(
DBASE=db.url,
URL= std.SQLWebApp.url
),
host_requirement=host,
container = unfurl_datatypes_DockerContainer(
environment=unfurl.datatypes.EnvironmentVariables(
DBASE=db.url, URL=std.SQLWebApp.url
),
host_requirement=host,
container=unfurl_datatypes_DockerContainer(
image="registry.gitlab.com/gitlab-org/project-templates/express/main:latest",
ports=["5000:5000"],
deploy={"resources": {"limits": {"memory": Inputs.mem_size }}}
)
deploy={"resources": {"limits": {"memory": Inputs.mem_size}}},
),
)

__root__ = std.SQLWebApp(
container=container,
db=db,
subdomain="myapp"
)
__root__ = std.SQLWebApp(container=container, db=db, subdomain="myapp")
Binary file added docs/images/quickstart-aws-plan.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/images/quickstart-k8s-deploy.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
8 changes: 5 additions & 3 deletions docs/jobs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,11 @@ a job is created and run. Running a job entails these steps:

After the job finishes, a summary is printed showing the results of each operation along with any `outputs` defined in the model.

.. image:: images/job-summary.png
.. figure:: images/job-summary.png
:align: center

Example deploy job output

Generated Files
===============

Expand Down Expand Up @@ -120,13 +122,13 @@ Changes are detected by comparing a digest of the values of the inputs and prope
Undeploy (teardown)
=====================

The undeploy (:cli:`teardown<unfurl-teardown>`) workflow builds a plan where resources are deleted from the edges to the root, based on the topology's dependency graph -- essentially the reverse order of how it was deployed.
The undeploy workflow (invoked by the :cli:`unfurl teardown<unfurl-teardown>` command) builds a plan where resources are deleted from the edges to the root, based on the topology's dependency graph -- essentially the reverse order of how it was deployed.

A resource will be excluded from deletion if any of the following are true:

* It was not created by the deployment (e.g. it was `discovered <Resource Discovery>`). This can be overridden by the :cli:`--destroyunmanaged<cmdoption-unfurl-deploy-destroyunmanaged>` job option. This is by the determined by the ``created`` field in the resource's status.

* It is managed by another resource. In that case,the name of the resource that manages it is the value of its ``created`` field. (And it is the responsibility of the managing resource's delete operation to also delete this resource).
* It is managed by another resource. In that case, the name of the resource that manages it is the value of its ``created`` field. In this case, it is the responsibility of the managing resource's delete operation to also delete this resource.

* Its status has ``protected`` set to true.

Expand Down
83 changes: 53 additions & 30 deletions docs/quickstart.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,15 @@ If this is the first time you've created a Unfurl project, you'll notice a messa
Step 2: Describe your application
---------------------------------

Now that your project is set up, let's deploy a web app. Let's consider a simple nodejs app that connects to a Postgres database.
Now that your project is set up, we'll create an TOSCA `blueprint<Step 2: Create a cloud blueprint>` for deploying our application. In this example, its a container image of a simple nodejs web app that requires a connection to a Postgres database. Here are some of things we need to do to deploy this web application:

Our web app is a container image.
* Create a service that can run the container image.
* Deploy a database and connect it to the web app.
* Consider DNS to resolve to the web app.

* We to create a service that can run the container image.
* Need to have access to a database and connect to the database.
* It also needs to hooked up to DNS.
We'll add the blueprint to the project's ``ensemble-template.yaml`` file so that it can be reused by different :doc:`ensembles`.

In ensemble-template.yaml we'll define our web app at a high-level.

The TOSCA specification defines types that provide basic abstractions for resources like compute instances and container images. In addition, we've developed the |stdlib|_.
The TOSCA specification defines types that provide basic abstractions for resources like compute instances and container images. In addition to TOSCA's built-in types, we'll use our |stdlib|_, so first we need to import that:

1. Open ``ensemble-template.yaml`` and uncomment these lines:

Expand All @@ -42,7 +40,7 @@ The TOSCA specification defines types that provide basic abstractions for resour
std:
url: https://unfurl.cloud/onecommons/std.git
2. If you want to follow along using the Python examples, open ``service_template.py`` and uncomment these lines:
2. You can create your TOSCA blueprint in either YAML or Python. If you want to follow along using the Python examples, open ``service_template.py`` and uncomment these lines:

.. code-block:: python
Expand All @@ -55,8 +53,7 @@ This will make sure the changes you just made are valid but more importantly, as

4. Add the blueprint.

For our example, we'll use these types to model our application:
Add this to either service_template.py or ensemble-template.yaml:
Copy the code below to either service_template.py or ensemble-template.yaml. They are equivalent, in fact you can `bi-directionally convert<usage>` them using the :cli:`unfurl export<unfurl-export>` command.

.. tab-set-code::

Expand All @@ -66,14 +63,16 @@ Add this to either service_template.py or ensemble-template.yaml:
.. literalinclude:: ./examples/quickstart_service_template.yaml
:language: yaml

Here we declare a few abstract resources: a service to run the container, a Postgres database, and a web application as the public root of the blueprint, along with some :std:ref:`inputs` to parameterize the blueprint. The parts of the blueprint that are not abstract are specific to our actual application: the container image we'll use and the environment variables it expects. In the next step we'll instantiate those abstract types with implementations appropriate for the environment we're deploying into.

Step 3 Instantiate your blueprint
---------------------------------

Now we have a model that we can customize for different environments.
Now that we have a model, we can customize for different environments.
In this example, let's suppose there are two types of environments we want to deploy this into:

* a production environment that deploys to AWS and using AWS RDS database
* a development environments that runs the app and Postgres as services in a Kubernetes cluster.
* A production environment that deploys to AWS that installs the app on EC2 compute instance and deploys an AWS RDS database.
* A development environment that runs the app and Postgres as services on a local Kubernetes cluster.

Let's create those environments, along with a deployment for each:

Expand All @@ -89,9 +88,7 @@ The ``--skeleton`` option lets you specify an alternative to the default project

Store the master password found in ``ensemble/local/unfurl.yaml`` in a safe place! By default this password is used to encrypt any sensitive data committed to repository. See :doc:`secrets` for more information.

There are different approaches to customize a blueprint but simple one is to declare deployment blueprints. A `deployment blueprint` is a blueprint that is only applied when its criteria matches the deployment environment. It inherits from the global blueprint and includes node templates that override the blueprint's.

Ensemble's ``deployment_blueprints`` In Python, a `deployment blueprint` is represented as a Python class with the customized template objects as class attributes.
There are different approaches to customize a blueprint for different environments but a simple one is to declare deployment blueprints. A `deployment blueprint` is a blueprint that is only applied when its criteria matches the deployment environment. It inherits from the service template's blueprint and includes node templates that override the blueprint's. In YAML, they are declared in ``deployment_blueprints`` section of an ensemble. In Python, a `deployment blueprint` is represented as a Python class with the customized template objects as class attributes.

Add the following code below the code from the previous step:

Expand All @@ -105,40 +102,66 @@ Add the following code below the code from the previous step:
.. literalinclude:: ./examples/quickstart_deployment_blueprints.yaml
:language: yaml

Here, each deployment blueprint replaces the "host" and "db" node templates with subclasses of those abstract types that are specific to the cloud provider we want to deploy into.

If you look at those [implementations](https://unfurl.cloud/onecommons/std), you'll see they invoke Terraform, Ansible, and Kompose. If we defined our own types instead of using these predefined ones, we'd have to implement `operations<Interfaces and Operations>` for deploying them. See the `Configurators` chapter to learn how to implement your own as `Ansible` playbooks, `Terraform` modules, or by invoking `shell` commands.

You can use the :cli:`unfurl plan<unfurl-plan>` command to review the scripts and artifact Unfurl generates to deploys your ensemble. For example if we run

.. code-block:: shell
unfurl plan production
Here we are using existing implementations defined in the std library -- to write your own, check out our examples for adding `Ansible` playbooks, `Terraform` modules or invoking `shell` commands.
You'll see something like this:

Now if we run :cli:`unfurl plan<unfurl-plan>`
.. figure:: images/quickstart-aws-plan.png
:align: center


``unfurl plan production`` output

The plan's output includes the location of files that were generated ("rendered") while creating the plan, for example, a Terraform module to deploy the AWS RDS database -- see `generated files`.

Step 4. Deploy and manage
-------------------------

Now we're ready to deploy our application.
Run :cli:`unfurl deploy development<unfurl-deploy>` from the command line to deploy the development ensemble.
Now we're ready to deploy our application. Run :cli:`unfurl deploy development<unfurl-deploy>` from the command line to deploy the development ensemble. You can also use the ``--dryrun`` flag to simulate the deployment.

After the job finishes, a summary is printed showing the results of each operation:

.. figure:: images/quickstart-k8s-deploy.png
:align: center
:alt:

``unfurl deploy development`` output

* :cli:`unfurl commit<unfurl-commit>` It will commit to git the latest configuration and a history of changes to your cloud accounts. (Or you could have used the ``--commit`` flag with :cli:`unfurl depoy<unfurl-deploy>`)
🎉 Congratulations on your first deployment with Unfurl! 🎉

* You can ``unfurl serve --gui`` Or host your repositories to `Unfurl Cloud`.
Now that you've deployed your ensemble, here are some ways you can manage your deployment:

* If you make changes to your deployment will update it.
* Commit your changes with :cli:`unfurl commit<unfurl-commit>`. This will commit to git the latest configuration and history of changes made by the deployment, encrypting any sensitive data. (Or use ``--commit`` flag with the deploy :cli:`unfurl deploy<unfurl-deploy>` command to do this automatically.)

* Delete it using :cli:`unfurl teardown<unfurl-teardown>`.
* Run `unfurl serve --gui<Browser-based Admin User Interface>` to view and manage your deployment. Or host your repositories on `Unfurl Cloud`_ for a full-fledged, multi-user admin experience.

* Run `Ad-hoc Jobs`.

* If you make changes to your deployment's configuration, re-running `unfurl deploy<Updating a deployment>` will update the existing deployment.

* Delete it using the `unfurl teardown<Undeploy (teardown)>` command.

Step 5. Share and Collaborate
-----------------------------

To share your blueprint and deployment, push your repository to a git host service such as Github or Gitlab (or better yet, `Unfurl Cloud`_!). You just have to `configure git remotes<Publishing your project>` for the git repositories we created.

When we ran :cli:`unfurl init<unfurl-init>`, we relied on the default behavior of creating a separate git repository for each ensemble. This allows the project's blueprints and deployments to have separate histories and access control.

We can make the blueprint repository public but limit access to the production repository to system admins. In either case, you'd use the `unfurl clone<Cloning projects and ensembles>` command to clone the blueprint or the ensemble.
When we ran :cli:`unfurl init<unfurl-init>`, we relied on the default behavior of creating a separate git repository for each ensemble. This allows the project's blueprints and deployments to have separate histories and access control. This way we can make the blueprint repository public but limit access to the production repository to system admins. In either case, you'd use the `unfurl clone<Cloning projects and ensembles>` command to clone the blueprint or the ensemble.

If you want to create a new deployment from the blueprint, clone the blueprint repository, by default Unfurl will create a new ensemble using the blueprint unless the ``--empty`` flag is used.
If you want to create a new deployment from the blueprint, clone the blueprint repository -- by default, Unfurl will create a new ensemble using the blueprint unless the ``--empty`` flag is used.

If you want to manage one of the deployments we already deployed, clone the repository that has that ensemble.

.. tip::

If we had used ``--submodule`` option with :cli:`unfurl init<unfurl-init>` (or manually added a submodule using ``git submodule add``) then the unfurl clone command would have cloned those ensembles too as submodules.

Once multiple users are sharing your projects can start `exploring<step5>` the different ways you can collaborate together to develop and manage your blueprints and deployments.
Once multiple users are sharing your projects, start `exploring<step5>` the different ways you can collaborate together to develop and manage your blueprints and deployments.
49 changes: 46 additions & 3 deletions tests/test_docs.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,20 @@
import fnmatch
import pathlib
import shutil
import unittest
import os
import glob
from shutil import which
import pytest

from .test_dsl import _to_yaml
from toscaparser.tosca_template import ToscaTemplate
from unfurl.localenv import LocalConfig
from unfurl.yamlmanifest import YamlManifest, _basepath
from unfurl.yamlloader import YamlConfig
from unfurl.spec import ToscaSpec
from tosca import global_state
from unfurl.testing import CliRunner, run_cmd

basedir = os.path.join(os.path.dirname(__file__), "..", "docs", "examples")

Expand All @@ -20,9 +27,9 @@ def test_schemas(self):
path=os.path.join(basedir, "job.yaml"),
schema=os.path.join(_basepath, "changelog-schema.json"),
)
# path = os.path.join(basedir, "service-template.yaml")
# serviceTemplate = YamlConfig(path=path)
# assert ToscaSpec(serviceTemplate.config, path=path)
path = os.path.join(basedir, "service-template.yaml")
serviceTemplate = YamlConfig(path=path)
assert ToscaSpec(serviceTemplate.config, path=path)

def test_python_snippets(self):
# examples generated like:
Expand Down Expand Up @@ -65,3 +72,39 @@ def test_python_example(self):
from_py = _to_yaml(pyfile.read(), True)
assert from_py["topology_template"]["outputs"] == yaml_template.topology_template._tpl_outputs()
assert from_py["topology_template"]["inputs"] == yaml_template.topology_template._tpl_inputs()

ensemble_template = """
apiVersion: unfurl/v1alpha1
spec:
service_template:
+?include: service_template.py
repositories:
std:
url: https://unfurl.cloud/onecommons/std.git
"""

@pytest.mark.skipif(
"k8s" in os.getenv("UNFURL_TEST_SKIP", ""), reason="UNFURL_TEST_SKIP for k8s set"
)
# skip if we don't have kompose installed but require CI to have it
@pytest.mark.skipif(
not os.getenv("CI") and not which("kompose"), reason="kompose command not found"
)
def test_quickstart():
runner = CliRunner()
with runner.isolated_filesystem("tmp"):
run_cmd(runner, ["init", "myproject", "--empty"])
os.chdir("myproject")
with open("ensemble-template.yaml", "w") as f:
f.write(ensemble_template)
base = pathlib.Path(basedir)
shutil.copy(base / "quickstart_service_template.py", "service_template.py")
run_cmd(runner, "validate")
run_cmd(runner, "init production --skeleton aws --use-environment production")
run_cmd(runner, "init development --skeleton k8s --use-environment development")
with open(base / "quickstart_deployment_blueprints.py") as src_file:
deployment_blueprint = src_file.read()
with open("service_template.py", "a") as f:
f.write(deployment_blueprint)
run_cmd(runner, "plan production")
run_cmd(runner, "deploy --dryrun --approve development")

0 comments on commit c9abb35

Please sign in to comment.