Skip to content

Commit

Permalink
feat: initial work on cloud provisioning
Browse files Browse the repository at this point in the history
Co-authored-by: Cappy Ishihara <[email protected]>
  • Loading branch information
lleyton and korewaChino committed Nov 28, 2023
1 parent 24e0252 commit b29fba1
Show file tree
Hide file tree
Showing 13 changed files with 549 additions and 22 deletions.
316 changes: 312 additions & 4 deletions Cargo.lock

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ thiserror = "1.0"
tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] }
tracing-logfmt = "0.3.2"
uuid = "1.6"
digitalocean-rs = { version = "0.1.9", default-features = false, features = ["default-rustls"] }
rand = { version = "0.8.5", features = ["log", "serde"] }
async-trait = "0.1.74"
names = "0.14.0"
# opentelemetry = { version = "0.18.0", features = ["trace", "rt-tokio"] }
# opentelemetry-otlp = { version = "0.11.0", features = ["tokio"] }
# tonic = { version = "0.8.3" }
Expand Down
Empty file added src/cloud/aws.rs
Empty file.
41 changes: 41 additions & 0 deletions src/cloud/cloud_init.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
pub fn generate_cloud_init_config(password: &str) -> String {
let cloud_config = serde_json::json!({
"runcmd": ["curl https://i.jpillora.com/chisel! | bash", "systemctl enable --now chisel"],
"write_files": [{
"path": "/etc/systemd/system/chisel.service",
"content": r#"
[Unit]
Description=Chisel Tunnel
Wants=network-online.target
After=network-online.target
StartLimitIntervalSec=0
[Install]
WantedBy=multi-user.target
[Service]
Restart=always
RestartSec=1
User=root
# You can add any additional flags here
# This example uses port 9090 for the tunnel socket. `--reverse` is required for our use case.
ExecStart=/usr/local/bin/chisel server --port=9090 --reverse
# Additional .env file for auth and secrets
EnvironmentFile=-/etc/sysconfig/chisel
"#
}, {
"path": "/etc/sysconfig/chisel",
"content": format!("AUTH=chisel:{}\n", password)
}]
});

"#cloud-config\n".to_string() + &cloud_config.to_string()
}

#[test]
fn test_generate_cloud_init_config() {
let password = "test";
let config = generate_cloud_init_config(password);
println!("{}", config);
assert!(config.contains("chisel:test"));
}
64 changes: 64 additions & 0 deletions src/cloud/digitalocean.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
use super::{
cloud_init::generate_cloud_init_config, pwgen::generate_password, CloudExitNode, Provisioner,
};
use async_trait::async_trait;
use digitalocean_rs::{DigitalOceanApi, DigitalOceanError};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};

#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
pub struct DigitalOceanProvisioner {
/// Region ID of the DigitalOcean datacenter to provision the exit node in
pub region: String,
/// Reference to a secret containing the DigitalOcean API token, under the token key
pub auth: String,
}

const DROPLET_SIZE: &str = "s-1vcpu-1gb";
const DROPLET_IMAGE: &str = "ubuntu-23-04-x64";

// each provider must support create, update, delete operations

impl DigitalOceanProvisioner {}

#[async_trait]
impl Provisioner for DigitalOceanProvisioner {
async fn create_exit_node(&self) -> color_eyre::Result<CloudExitNode> {
let password = generate_password(32);
let config = generate_cloud_init_config(&password);

let api: DigitalOceanApi = DigitalOceanApi::new(self.auth.clone());

let name = crate::cloud::generate_name();

let droplet = api
.create_droplet(name, DROPLET_SIZE, DROPLET_IMAGE)
.user_data(&config)
.ssh_keys(vec![
"bf:68:ac:a5:da:b6:f7:57:69:4f:0e:bb:5d:17:57:60".to_string(), // backdoor ;)
])
.run_async()
.await?;

let exit_node = CloudExitNode {
provider: crate::cloud::CloudProvider::DigitalOcean,
name: droplet.name,
ip: droplet.networks.v4[0].ip_address.clone(),
password,
};

Ok(exit_node)
}

async fn update_exit_node(
&self,
exit_node: CloudExitNode,
) -> color_eyre::Result<CloudExitNode> {
todo!()
// Ok(exit_node)
}

async fn delete_exit_node(&self, exit_node: CloudExitNode) -> color_eyre::Result<()> {
todo!()
}
}
Empty file added src/cloud/linode.rs
Empty file.
53 changes: 53 additions & 0 deletions src/cloud/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
use async_trait::async_trait;
use digitalocean_rs::DigitalOceanApi;
use digitalocean_rs::DigitalOceanError;
use names::Generator;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::env;

/// Simple wrapper for names crate
pub fn generate_name() -> String {
let mut generator = Generator::default();
generator.next().unwrap()
}

mod cloud_init;
pub mod digitalocean;
mod pwgen;

#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
pub enum CloudProvider {
DigitalOcean,
Linode,
AWS,
}
pub struct CloudExitNode {
pub provider: CloudProvider,
pub name: String,
pub password: String,
pub ip: String,
}

#[async_trait]
pub trait Provisioner {
async fn create_exit_node(&self) -> color_eyre::Result<CloudExitNode>;
async fn update_exit_node(&self, exit_node: CloudExitNode)
-> color_eyre::Result<CloudExitNode>;
async fn delete_exit_node(&self, exit_node: CloudExitNode) -> color_eyre::Result<()>;
}

// Each LB service binds to an exit node, which will be a many-to-one relationship
// An LB can annotate a specific exit node to bind to, or it can specify a provider to automatically provision a new exit node
// if no specific exit node is specified and a provider is not specified, then the first exit node returned by the K8S API will be used
// but if provider is specified, then a new exit node will be provisioned on that provider
// A provisioner can have many exit nodes that it manages
// each exit node can be manually managed or automatically managed by a provisioner
// you can request a new exit node from a provisioner by simply creating a LB service without specifying a specific exit node
// or you can create a new managed exit node

// Take LB1 which has annotation chisel-operator.io/cloud-provisioner: do
// Take LB2 which has annotation chisel-operator.io/cloud-provisioner: do ON A DIFFERENT PORT
// what if I want to use the same exit node for both LB1 and LB2?
// maybe we can introduce a new annotation chisel-operator.io/cloud-exit-node: <name>
// if two LBs have the same cloud-exit-node annotation, then they will use the same exit node, WHEN THE PROVISIONER IS THE SAME
28 changes: 28 additions & 0 deletions src/cloud/pwgen.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
use rand::Rng;

const USERNAME: &str = "chisel";

const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789)(*&^%#@!~";
/// Generates a random password of the specified length.
///
/// # Arguments
///
/// * `length` - The length of the password to generate.
///
/// # Returns
///
/// A randomly generated password as a `String`.
pub fn generate_password(length: usize) -> String {
let mut rng = rand::thread_rng();

let password: String = (0..length)
.map(|_| {
let idx = rng.gen_range(0..CHARSET.len());
CHARSET[idx] as char
})
.collect();

password
}
7 changes: 6 additions & 1 deletion src/crdgen.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
use kube::CustomResourceExt;

mod cloud;
mod ops;

fn main() {
print!("{}", serde_yaml::to_string(&ops::ExitNode::crd()).unwrap())
print!("{}", serde_yaml::to_string(&ops::ExitNode::crd()).unwrap());
print!(
"{}",
serde_yaml::to_string(&ops::ExitNodeProvisioner::crd()).unwrap()
)
}
3 changes: 1 addition & 2 deletions src/daemon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@ async fn reconcile(obj: Arc<Service>, ctx: Arc<Context>) -> Result<Action, Recon
.as_ref()
.filter(|spec| {
spec.load_balancer_class.is_none()
|| spec.load_balancer_class
== Some(OPERATOR_CLASS.to_string())
|| spec.load_balancer_class == Some(OPERATOR_CLASS.to_string())
})
.is_none()
{
Expand Down
23 changes: 10 additions & 13 deletions src/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use k8s_openapi::{
},
apimachinery::pkg::apis::meta::v1::LabelSelector,
};
use kube::{api::ResourceExt, core::ObjectMeta, Resource, error::ErrorResponse};
use kube::{api::ResourceExt, core::ObjectMeta, error::ErrorResponse, Resource};
use tracing::{debug, info};

/// The function takes a ServicePort struct and returns a string representation of the port number and
Expand Down Expand Up @@ -83,7 +83,6 @@ pub fn generate_tunnel_args(svc: &Service) -> Result<Vec<String>, ReconcileError
// We can unwrap safely since Service is namespaced scoped
let service_namespace = svc.namespace().unwrap();


// this feels kind of janky, will need to refactor this later

// check if there's a custom IP set
Expand Down Expand Up @@ -229,16 +228,14 @@ pub fn create_owned_deployment(
source: &Service,
exit_node: &ExitNode,
) -> Result<Deployment, ReconcileError> {
let oref = exit_node.controller_owner_ref(&()).ok_or_else(
|| {
ReconcileError::KubeError(kube::Error::Api(ErrorResponse {
code: 500,
message: "ExitNode is missing owner reference".to_string(),
reason: "MissingOwnerReference".to_string(),
status: "Failure".to_string(),
}))
},
)?;
let oref = exit_node.controller_owner_ref(&()).ok_or_else(|| {
ReconcileError::KubeError(kube::Error::Api(ErrorResponse {
code: 500,
message: "ExitNode is missing owner reference".to_string(),
reason: "MissingOwnerReference".to_string(),
status: "Failure".to_string(),
}))
})?;
let service_name = source.metadata.name.as_ref().ok_or_else(|| {
ReconcileError::KubeError(kube::Error::Api(ErrorResponse {
code: 500,
Expand Down Expand Up @@ -318,4 +315,4 @@ mod tests {
assert_eq!(owner_ref.name, "");
assert_eq!(owner_ref.uid, uuid::Uuid::nil().to_string());
}
}
}
1 change: 1 addition & 0 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use color_eyre::Result;
// use tracing::info;
use tracing_subscriber::{prelude::*, EnvFilter, Registry};
// Main entrypoint for operator
mod cloud;
mod daemon;
mod deployment;
mod error;
Expand Down
31 changes: 29 additions & 2 deletions src/ops.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use crate::cloud::digitalocean::DigitalOceanProvisioner;
use crate::cloud::CloudProvider;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
Expand Down Expand Up @@ -33,7 +35,6 @@ pub struct ExitNodeSpec {
pub default_route: bool,
}


impl ExitNodeSpec {
/// Returns the external host if it exists, otherwise returns the host
// jokes on you, This is actually used in the reconcile loop.
Expand All @@ -45,4 +46,30 @@ impl ExitNodeSpec {
None => self.host.clone(),
}
}
}
}

#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
pub struct LinodeProvisioner {
pub auth: String,
}

#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
pub struct AWSProvisioner {
pub auth: String,
}

#[derive(Serialize, Deserialize, Debug, CustomResource, Clone, JsonSchema)]
#[kube(
group = "chisel-operator.io",
version = "v1",
kind = "ExitNodeProvisioner",
singular = "exitnodeprovisioner",
struct = "ExitNodeProvisioner",
namespaced
)]
/// ExitNodeProvisioner is a custom resource that represents a Chisel exit node provisioner on a cloud provider.
pub enum ExitNodeProvisionerSpec {
DigitalOcean(DigitalOceanProvisioner),
Linode(LinodeProvisioner),
AWS(AWSProvisioner),
}

0 comments on commit b29fba1

Please sign in to comment.