Skip to content

Commit

Permalink
Add Chronos jobs dependencies through 'parent_job' capability
Browse files Browse the repository at this point in the history
See #34
  • Loading branch information
lorenzo-biava committed May 23, 2016
1 parent 35fedba commit 66f2ca3
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 64 deletions.
18 changes: 17 additions & 1 deletion src/main/java/it/reply/orchestrator/service/ToscaService.java
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,25 @@ public ArchiveRoot prepareTemplate(@Nonnull String toscaTemplate, Map<String, Ob
public PropertyValue<?> getCapabilityPropertyValueByName(Capability capability,
String propertyName);

public RelationshipTemplate getRelationshipTemplateByCapabilityName(
public List<RelationshipTemplate> getRelationshipTemplatesByCapabilityName(
Map<String, RelationshipTemplate> relationships, String capabilityName);

/**
* Finds all the nodes associated to the given {@link NodeTemplate} with a capability with the
* given name.
*
* @param nodes
* the template's node map.
* @param nodeTemplate
* the origin node.
* @param capabilityName
* the name of the capability.
* @return a map with the nodes (and their names) associated to the origin node with given
* capability.
*/
public Map<String, NodeTemplate> getAssociatedNodesByCapability(Map<String, NodeTemplate> nodes,
NodeTemplate nodeTemplate, String capabilityName);

public Map<String, NodeTemplate> getCountNodes(ArchiveRoot archiveRoot);

public int getCount(NodeTemplate nodeTemplate);
Expand Down
33 changes: 27 additions & 6 deletions src/main/java/it/reply/orchestrator/service/ToscaServiceImpl.java
Original file line number Diff line number Diff line change
Expand Up @@ -371,16 +371,37 @@ public PropertyValue<?> getCapabilityPropertyValueByName(Capability capability,
}

@Override
public RelationshipTemplate getRelationshipTemplateByCapabilityName(
public Map<String, NodeTemplate> getAssociatedNodesByCapability(Map<String, NodeTemplate> nodes,
NodeTemplate nodeTemplate, String capabilityName) {
Map<String, NodeTemplate> associatedNodes = new HashMap<>();

List<RelationshipTemplate> relationships =
getRelationshipTemplatesByCapabilityName(nodeTemplate.getRelationships(), capabilityName);
if (!relationships.isEmpty()) {
for (RelationshipTemplate relationship : relationships) {
String associatedNodeName = relationship.getTarget();
associatedNodes.put(associatedNodeName, nodes.get(associatedNodeName));
}
}

return associatedNodes;
}

@Override
public List<RelationshipTemplate> getRelationshipTemplatesByCapabilityName(
Map<String, RelationshipTemplate> relationships, String capabilityName) {
if (relationships == null)
return null;

List<RelationshipTemplate> relationshipTemplates = new ArrayList<>();
if (relationships == null) {
return relationshipTemplates;
}

for (Map.Entry<String, RelationshipTemplate> relationship : relationships.entrySet()) {
if (relationship.getValue().getTargetedCapabilityName().equals(capabilityName))
return relationship.getValue();
if (relationship.getValue().getTargetedCapabilityName().equals(capabilityName)) {
relationshipTemplates.add(relationship.getValue());
}
}
return null;
return relationshipTemplates;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
package it.reply.orchestrator.service.deployment.providers;

import alien4cloud.model.components.AbstractPropertyValue;
import alien4cloud.model.components.ComplexPropertyValue;
import alien4cloud.model.components.DeploymentArtifact;
import alien4cloud.model.components.ListPropertyValue;
import alien4cloud.model.components.PropertyValue;
import alien4cloud.model.components.ScalarPropertyValue;
import alien4cloud.model.topology.Capability;
import alien4cloud.model.topology.NodeTemplate;
import alien4cloud.model.topology.RelationshipTemplate;
import alien4cloud.tosca.model.ArchiveRoot;
import alien4cloud.tosca.normative.SizeType;
import alien4cloud.tosca.parser.ParsingException;
Expand Down Expand Up @@ -487,14 +485,20 @@ public enum JobDependencyType {
}

private Job chronosJob;
private String toscaNodeName;
private Collection<IndigoJob> children = new ArrayList<>();
private Collection<IndigoJob> parents = new ArrayList<>();

public IndigoJob(Job chronosJob) {
public IndigoJob(String toscaNodeName, Job chronosJob) {
super();
this.toscaNodeName = toscaNodeName;
this.chronosJob = chronosJob;
}

public String getToscaNodeName() {
return toscaNodeName;
}

public Job getChronosJob() {
return chronosJob;
}
Expand All @@ -509,7 +513,7 @@ public Collection<IndigoJob> getParents() {

@Override
public String toString() {
return "IndigoJob [chronosJob=" + chronosJob.getName();
return "IndigoJob [toscaNodeName=" + toscaNodeName + ", chronosJob=" + chronosJob.getName();
}

}
Expand Down Expand Up @@ -556,8 +560,8 @@ protected Multimap<JobDependencyType, IndigoJob> generateJobGraph(Deployment dep
String nodeName = node.getKey();
if (isChronosNode(nodeTemplate)) {
Job chronosJob = createJob(nodes, deploymentId, nodeName, nodeTemplate);

IndigoJob job = new IndigoJob(chronosJob);
;
IndigoJob job = new IndigoJob(nodeName, chronosJob);
jobs.put(nodeName, job);
}
}
Expand Down Expand Up @@ -683,11 +687,17 @@ protected boolean isChronosNode(NodeTemplate nodeTemplate) {

protected List<String> getJobParents(NodeTemplate nodeTemplate, String nodeName,
Map<String, NodeTemplate> nodes) {
// FIXME Implement parent extraction
// Requirement parentNode = nodeTemplate.getRequirements().get("job_predecessor");
// Get Chronos parent job dependency
String parentJobCapabilityName = "parent_job";
Map<String, NodeTemplate> parentJobs =
toscaService.getAssociatedNodesByCapability(nodes, nodeTemplate, parentJobCapabilityName);

// STUB !
return nodeName.equals("chronos_job_upload") ? Lists.newArrayList("chronos_job") : null;
if (parentJobs.isEmpty()) {
return null;
} else {
// WARNING: cycle check is done later!
return Lists.newArrayList(parentJobs.keySet());
}
}

protected Job createJob(Map<String, NodeTemplate> nodes, String deploymentId, String nodeName,
Expand Down Expand Up @@ -759,14 +769,16 @@ protected Job createJob(Map<String, NodeTemplate> nodes, String deploymentId, St

// Get Docker host dependency
String dockerCapabilityName = "host";
RelationshipTemplate dockerRelationship =
toscaService.getRelationshipTemplateByCapabilityName(nodeTemplate.getRelationships(),
dockerCapabilityName);
Map<String, NodeTemplate> dockerRelationships =
toscaService.getAssociatedNodesByCapability(nodes, nodeTemplate, dockerCapabilityName);
Double dockerNumCpus = null;
Double dockerMemSize = null;
if (dockerRelationship != null) {
String dockerNodeName = dockerRelationship.getTarget();
NodeTemplate dockerNode = nodes.get(dockerNodeName);
if (!dockerRelationships.isEmpty()) {
/*
* WARNING: The TOSCA validation should already check the limits (currently Alien4Cloud does
* not...)
*/
NodeTemplate dockerNode = dockerRelationships.values().iterator().next();
Capability dockerCapability = dockerNode.getCapabilities().get(dockerCapabilityName);
dockerNumCpus = Double.parseDouble((String) toscaService
.getCapabilityPropertyValueByName(dockerCapability, "num_cpus").getValue());
Expand Down
146 changes: 105 additions & 41 deletions src/test/resources/tosca/chronos_tosca_minimal.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,72 +4,136 @@ imports:
- indigo_custom_types: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/custom_types.yaml

description: >
TOSCA examples for specifying Chronos and Marathon jobs to enable the
specification of applications and services in INDIGO.
TOSCA examples for specifying a Chronos Job that runs an application using the input stored at some URL and uploads the output data to an http(s) or ftp(s) or webdav(s) repository
topology_template:

inputs:
command:
input_urls:
type: list
description: List of input files that will be downloaded in the job sandbox (archives will be automatically uncompressed)
entry_schema:
type: string
required: yes

output_protocol:
type: string
description: Protocol that will be used to upload the output generated by the job
required: false
default: 'echo \"command not replaced\"'
constraints:
- valid_values: [ http, https, webdav, webdavs, ftp, ftps ]
required: yes

node_templates:
output_endpoint:
type: string
description: 'Endpoint to be used to upload the output generated by the job. Example: http://myhttpserver:8080/'
required: yes

output_path:
type: string
description: 'Path in the repository where the output data will be uploaded (to be appended to the endpoint string, e.g. http://myhttpserver:8080/output_path/)'
required: yes

output_username:
type: string
description: Username used to access the URL where the output files will be stored
required: yes

output_password:
type: string
description: Protocol that will be used to upload the output generated by the job
required: yes

output_filenames:
type: list
description: List of filenames generated by the application run
entry_schema:
type: string
required: yes

command:
type: string
description: Command to execute
default: 'env'
required: no

cpus:
type: float
description: Amount of CPUs for this job
required: yes

mem:
type: scalar-unit.size
description: Amount of Memory for this job
required: yes

docker_image:
type: string
description: Docker image to be used to run the container application
required: yes

node_templates:
chronos_job:
type: tosca.nodes.indigo.Container.Application.Docker.Chronos
properties:
schedule: 'R0//PT2S'
description: 'Execute app'
#command: /bin/bash env #fails!
command: env
uris: ['']
schedule: 'R0/2015-12-25T17:22:00Z/PT1M'
description: 'Execute Application'
command: { get_input: command }
uris: { get_input: input_urls}
retries: 3
environment_variables:
#OUTPUT_FILENAMES: { get_input: output_filenames }
OUTPUT_FILENAMES: { get_input: output_filenames }
ONEDATA_SERVICE_TOKEN: 'TOKEN_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_SPACE: 'DATA_SPACE_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_PATH: 'PATH_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_PROVIDERS: 'ONEDATA_PROVIDERS_TO_BE_SET_BY_THE_ORCHESTRATOR'
artifacts:
image:
file: libmesos/ubuntu
file: { get_input: docker_image }
type: tosca.artifacts.Deployment.Image.Container.Docker
requirements:
- host: docker_runtime1


chronos_job_upload:
type: tosca.nodes.indigo.Container.Application.Docker.Chronos
properties:
description: 'Upload output data'
command: 'echo \"I will upload something...\"'
retries: 3
environment_variables:
OUTPUT_PROTOCOL: { get_input: output_protocol }
OUTPUT_ENDPOINT: { get_input: output_endpoint }
OUTPUT_PATH: { get_input: output_path }
OUTPUT_USERNAME: { get_input: output_username }
OUTPUT_PASSWORD: { get_input: output_password }
OUTPUT_FILENAMES: { get_input: output_filenames }
ONEDATA_SERVICE_TOKEN: 'TOKEN_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_SPACE: 'DATA_SPACE_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_PATH: 'PATH_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_PROVIDERS: 'ONEDATA_PROVIDERS_TO_BE_SET_BY_THE_ORCHESTRATOR'
artifacts:
image:
#file: indigodatacloud/jobuploader # NOT EXISTS YET!
file: libmesos/ubuntu
type: tosca.artifacts.Deployment.Image.Container.Docker
requirements:
- host: docker_runtime2
- parent_job: chronos_job


docker_runtime1:
type: tosca.nodes.indigo.Container.Runtime.Docker
capabilities:
host:
properties:
num_cpus: 0.5
mem_size: 512 MB
num_cpus: { get_input: cpus }
mem_size: { get_input: mem }

chronos_job_upload:
type: tosca.nodes.indigo.Container.Application.Docker.Chronos
properties:
description: 'Upload output data'
#command: 'echo \"I will upload something...\"'
command: { get_input: command }
retries: 3
environment_variables:
#OUTPUT_PROTOCOL: { get_input: output_protocol }
#OUTPUT_ENDPOINT: { get_input: output_endpoint }
#OUTPUT_PATH: { get_input: output_path }
#OUTPUT_USERNAME: { get_input: output_username }
#OUTPUT_PASSWORD: { get_input: output_password }
#OUTPUT_TENANT: { get_input: output_tenant }
#OUTPUT_REGION: { get_input: output_region }
#OUTPUT_FILENAMES: { get_input: output_filenames }
ONEDATA_SERVICE_TOKEN: 'TOKEN_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_SPACE: 'DATA_SPACE_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_PATH: 'PATH_TO_BE_SET_BY_THE_ORCHESTRATOR'
ONEDATA_PROVIDERS: 'ONEDATA_PROVIDERS_TO_BE_SET_BY_THE_ORCHESTRATOR'
artifacts:
image:
#file: indigodatacloud/jobuploader # NOT EXISTS YET!
file: libmesos/ubuntu
type: tosca.artifacts.Deployment.Image.Container.Docker

docker_runtime2:
type: tosca.nodes.indigo.Container.Runtime.Docker
capabilities:
host:
properties:
num_cpus: 1.0
mem_size: 1024 MB

0 comments on commit 66f2ca3

Please sign in to comment.