From cfb9d1bb953cfd558ce713b49973aa4ddcaba66d Mon Sep 17 00:00:00 2001 From: SamyOubouaziz Date: Mon, 29 Jul 2024 15:18:43 +0200 Subject: [PATCH] fix(gen): remove backslashes on .html extensions --- .../dedibox/how-to/connect-to-dedibox.mdx | 2 +- bare-metal/dedibox/quickstart.mdx | 2 +- .../elastic-metal/how-to/connect-to-server.mdx | 2 +- .../elastic-metal-rv1-guidelines.mdx | 2 +- ...rnetes-added-new-kubernetes-operator-to.mdx | 2 +- compute/gpu/how-to/use-mig-with-kubernetes.mdx | 2 +- compute/gpu/how-to/use-preinstalled-env.mdx | 2 +- .../choosing-gpu-instance-type.mdx | 2 +- .../kubernetes-gpu-time-slicing.mdx | 2 +- .../understanding-nvidia-fp8.mdx | 2 +- .../understanding-nvidia-nemo.mdx | 2 +- .../understanding-nvidia-ngc.mdx | 2 +- .../install-nvidia-drivers-ubuntu.mdx | 4 ++-- compute/instances/api-cli/using-cloud-init.mdx | 2 +- .../instances/how-to/connect-to-instance.mdx | 2 +- compute/instances/quickstart.mdx | 2 +- .../preventing-outgoing-ddos.mdx | 8 ++++---- ...ix-dns-routed-ipv6-only-debian-bullseye.mdx | 2 +- .../api-cli/retrieve-monthly-consumption.mdx | 2 +- console/billing/how-to/add-payment-method.mdx | 2 +- .../kubernetes/api-cli/managing-storage.mdx | 2 +- .../how-to/use-nvidia-gpu-operator.mdx | 2 +- .../how-to/configure-htaccess-file.mdx | 2 +- .../cpanel-hosting/reference-content.mdx | 18 +++++++++--------- .../how-to/configure-network-netplan.mdx | 2 +- .../rpn/how-to/configure-jumboframes.mdx | 2 +- .../how-to/connect-to-server.mdx | 2 +- dedibox/dedicated-servers/quickstart.mdx | 2 +- dedibox/kvm-over-ip/how-to/dell-idrac6.mdx | 2 +- dedibox/kvm-over-ip/how-to/dell-idrac7.mdx | 2 +- dedibox/kvm-over-ip/how-to/dell-idrac8.mdx | 2 +- dedibox/kvm-over-ip/how-to/dell-idrac9.mdx | 2 +- dedibox/kvm-over-ip/how-to/hp-ilo.mdx | 4 ++-- dedibox/kvm-over-ip/how-to/quanta-computer.mdx | 2 +- dedibox/kvm-over-ip/how-to/supermicro.mdx | 2 +- dedibox/vps/how-to/connect-vps.mdx | 2 +- dedibox/vps/quickstart.mdx | 2 +- faq/instances.mdx | 2 +- .../how-to/create-ssh-key.mdx | 4 ++-- .../organizations-and-projects/quickstart.mdx | 4 ++-- .../how-to/understand-event-messages.mdx | 6 +++--- .../api-cli/send-emails-with-api.mdx | 2 +- .../cpanel-reference-content.mdx | 18 +++++++++--------- observability/cockpit/concepts.mdx | 2 +- .../how-to/send-metrics-logs-to-cockpit.mdx | 2 +- .../resetting-grafana-password-via-the-api.mdx | 2 +- .../api-cli/deploy-container-api.mdx | 2 +- .../configure-trigger-inputs.mdx | 2 +- .../functions/api-cli/deploy-function-api.mdx | 2 +- .../reference-content/code-examples.mdx | 4 ++-- .../configure-trigger-inputs.mdx | 2 +- .../messaging/api-cli/connect-aws-cli.mdx | 2 +- .../messaging/api-cli/python-node-sns.mdx | 10 +++++----- .../messaging/api-cli/python-node-sqs.mdx | 4 ++-- .../reference-content/sns-overview.mdx | 2 +- .../reference-content/sqs-overview.mdx | 2 +- ...import-data-to-serverless-sql-databases.mdx | 8 ++++---- .../api-cli/secure-connection-ssl-tls.mdx | 8 ++++---- .../sql-databases/how-to/manage-backups.mdx | 2 +- serverless/sql-databases/quickstart.mdx | 2 +- .../serverless-sql-databases-overview.mdx | 2 +- .../maximum-prepared-statements-reached.mdx | 6 +++--- storage/object/api-cli/enable-sse-c.mdx | 2 +- .../api-cli/generate-aws4-auth-signature.mdx | 2 +- .../object/api-cli/installing-minio-client.mdx | 2 +- .../object/api-cli/object-storage-aws-cli.mdx | 4 ++-- storage/object/api-cli/post-object.mdx | 2 +- .../troubleshooting/cannot-access-data.mdx | 2 +- .../object/troubleshooting/low-performance.mdx | 2 +- .../index.mdx | 2 +- tutorials/ansible-bionic-beaver/index.mdx | 4 ++-- tutorials/ansible-galaxy/index.mdx | 2 +- tutorials/arqbackup-pc-mac/index.mdx | 2 +- tutorials/automate-tasks-using-cron/index.mdx | 2 +- tutorials/back-up-postgresql-barman/index.mdx | 2 +- .../index.mdx | 4 ++-- tutorials/configure-apache-kafka/index.mdx | 2 +- tutorials/configure-graphite/index.mdx | 2 +- .../configure-nagios-monitoring/index.mdx | 2 +- .../index.mdx | 4 ++-- tutorials/configure-nodemcu-iot-hub/index.mdx | 2 +- tutorials/configure-smtp-relay-tem/index.mdx | 2 +- .../configure-virtual-machine-esxi/index.mdx | 4 ++-- .../create-esxi-cluster-dedibox/index.mdx | 4 ++-- .../index.mdx | 2 +- .../index.mdx | 8 ++++---- .../index.mdx | 2 +- tutorials/hadoop/index.mdx | 2 +- tutorials/hestiacp/index.mdx | 2 +- .../index.mdx | 4 ++-- tutorials/install-configure-couchdb/index.mdx | 2 +- tutorials/install-php-composer/index.mdx | 2 +- tutorials/install-postgresql/index.mdx | 4 ++-- tutorials/k8s-gitlab/index.mdx | 2 +- .../lb-firewall-haproxy-pfsense/index.mdx | 2 +- tutorials/librenms-monitoring/index.mdx | 2 +- tutorials/magento-ubuntu/index.mdx | 2 +- .../index.mdx | 2 +- tutorials/migrate-data-minio-client/index.mdx | 2 +- .../index.mdx | 2 +- .../index.mdx | 2 +- tutorials/nginx-reverse-proxy/index.mdx | 2 +- tutorials/nvidia-triton/index.mdx | 2 +- tutorials/odoo/index.mdx | 4 ++-- tutorials/proxmox-softraid/index.mdx | 4 ++-- tutorials/restic-s3-backup/index.mdx | 4 ++-- .../self-hosted-repository-gitea/index.mdx | 2 +- tutorials/setup-cockroachdb-cluster/index.mdx | 2 +- .../index.mdx | 2 +- tutorials/setup-jupyter-notebook/index.mdx | 4 ++-- .../setup-postfix-ubuntu-bionic/index.mdx | 2 +- tutorials/sinatra/index.mdx | 4 ++-- tutorials/strapi/index.mdx | 4 ++-- tutorials/terraform-quickstart/index.mdx | 4 ++-- .../index.mdx | 6 +++--- tutorials/use-cockpit-with-terraform/index.mdx | 2 +- .../veeam-backup-replication-s3/index.mdx | 8 ++++---- .../index.mdx | 2 +- tutorials/vuls-security-scanner/index.mdx | 2 +- tutorials/zammad-ticketing/index.mdx | 2 +- 120 files changed, 182 insertions(+), 182 deletions(-) diff --git a/bare-metal/dedibox/how-to/connect-to-dedibox.mdx b/bare-metal/dedibox/how-to/connect-to-dedibox.mdx index af6ccea8a0..8ac51a0c40 100644 --- a/bare-metal/dedibox/how-to/connect-to-dedibox.mdx +++ b/bare-metal/dedibox/how-to/connect-to-dedibox.mdx @@ -55,7 +55,7 @@ To connect to your server from Windows, you will need to use a small application To connect to your Instance from Windows, you will need to use a small application called **PuTTY**, an SSH client. -1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) +1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) 2. Launch PuTTY on your computer. The main screen of the application displays. 3. Enter your Instance's IP address in the **Hostname** field. diff --git a/bare-metal/dedibox/quickstart.mdx b/bare-metal/dedibox/quickstart.mdx index 36bb0991e2..cd0b594594 100644 --- a/bare-metal/dedibox/quickstart.mdx +++ b/bare-metal/dedibox/quickstart.mdx @@ -101,7 +101,7 @@ To connect to your server from Windows, you will need to use a small application To connect to your Instance from Windows, you will need to use a small application called **PuTTY**, an SSH client. -1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) +1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) 2. Launch PuTTY on your computer. The main screen of the application displays. 3. Enter your Instance's IP address in the **Hostname** field. diff --git a/bare-metal/elastic-metal/how-to/connect-to-server.mdx b/bare-metal/elastic-metal/how-to/connect-to-server.mdx index 1163d3d8f1..c2f817270f 100644 --- a/bare-metal/elastic-metal/how-to/connect-to-server.mdx +++ b/bare-metal/elastic-metal/how-to/connect-to-server.mdx @@ -51,7 +51,7 @@ This page shows you how to connect to your Scaleway Elastic Metal server via SSH To connect to your Elastic Metal server from Windows, you will need to use a small application called **PuTTY**, an SSH client. -1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) +1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) 2. Launch PuTTY on your computer. The main screen of the application displays: 3. Enter your Elastic Metal server's IP address in the **Hostname** field. diff --git a/bare-metal/elastic-metal/reference-content/elastic-metal-rv1-guidelines.mdx b/bare-metal/elastic-metal/reference-content/elastic-metal-rv1-guidelines.mdx index f26aa0a34d..f087580054 100644 --- a/bare-metal/elastic-metal/reference-content/elastic-metal-rv1-guidelines.mdx +++ b/bare-metal/elastic-metal/reference-content/elastic-metal-rv1-guidelines.mdx @@ -22,7 +22,7 @@ in the first or second partition. The partition that contains this boot file must be formatted as FAT32. This `boot.itb` file is in fact a [FIT -Image](https://docs.u-boot.org/en/latest/usage/fit/source_file_format.html/) +Image](https://docs.u-boot.org/en/latest/usage/fit/source_file_format.html) that must contain the following sections: - **kernel**: A Linux kernel image. diff --git a/changelog/july2023/2023-07-18-kubernetes-added-new-kubernetes-operator-to.mdx b/changelog/july2023/2023-07-18-kubernetes-added-new-kubernetes-operator-to.mdx index 2447534264..c5b7f0aba2 100644 --- a/changelog/july2023/2023-07-18-kubernetes-added-new-kubernetes-operator-to.mdx +++ b/changelog/july2023/2023-07-18-kubernetes-added-new-kubernetes-operator-to.mdx @@ -9,7 +9,7 @@ category: containers product: kubernetes --- -NVIDIA's [GPU operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html/) is installed by default on all new GPU pools, automatically bringing required software on your GPU worker nodes. +NVIDIA's [GPU operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html) is installed by default on all new GPU pools, automatically bringing required software on your GPU worker nodes. Find out how to activate or configure the operator in the [documentation](/containers/kubernetes/how-to/use-nvidia-gpu-operator/). diff --git a/compute/gpu/how-to/use-mig-with-kubernetes.mdx b/compute/gpu/how-to/use-mig-with-kubernetes.mdx index 4500eadf0b..f28ed57521 100644 --- a/compute/gpu/how-to/use-mig-with-kubernetes.mdx +++ b/compute/gpu/how-to/use-mig-with-kubernetes.mdx @@ -394,4 +394,4 @@ In this guide, we will explore the capabilities of NVIDIA MIG within a Kubernete All nodes added by the autoscaler will automatically receive the label `MIG`. Note, that updates to a tag may take up to five minutes to fully propagate. -For more information about NVIDIA MIG, refer to the official [NVIDIA MIG user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/) and the [Kubernetes GPU operator documentation](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/23.6.1/gpu-operator-mig.html/). \ No newline at end of file +For more information about NVIDIA MIG, refer to the official [NVIDIA MIG user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/) and the [Kubernetes GPU operator documentation](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/23.6.1/gpu-operator-mig.html). \ No newline at end of file diff --git a/compute/gpu/how-to/use-preinstalled-env.mdx b/compute/gpu/how-to/use-preinstalled-env.mdx index 1e09bf53a2..1df6db617e 100644 --- a/compute/gpu/how-to/use-preinstalled-env.mdx +++ b/compute/gpu/how-to/use-preinstalled-env.mdx @@ -33,7 +33,7 @@ Using the latest Ubuntu Focal GPU OS11 image gives you a minimal OS installation 1. [Connect to your Instance via SSH](/compute/instances/how-to/connect-to-instance/). You are now directly within the conda `ai` preinstalled environment. -2. Use the [official conda documentation](https://docs.conda.io/projects/conda/en/latest/commands.html/) if you need any help managing your conda environment. +2. Use the [official conda documentation](https://docs.conda.io/projects/conda/en/latest/commands.html) if you need any help managing your conda environment. For a full, detailed list of the Python packages and versions preinstalled in this environment, look at the content of the `/root/conda-ai-env-requirements.frozen` file. diff --git a/compute/gpu/reference-content/choosing-gpu-instance-type.mdx b/compute/gpu/reference-content/choosing-gpu-instance-type.mdx index 1392c742c6..7c74dc7fb5 100644 --- a/compute/gpu/reference-content/choosing-gpu-instance-type.mdx +++ b/compute/gpu/reference-content/choosing-gpu-instance-type.mdx @@ -28,7 +28,7 @@ Below, you will find a guide to help you make an informed decision: * **Workload requirements:** Identify the nature of your workload. Are you running machine learning, deep learning, high-performance computing (HPC), data analytics, or graphics-intensive applications? Different Instance types are optimized for different types of workloads. For example, the H100 is not designed for graphics rendering. However, other models are. As [stated by Tim Dettmers](https://timdettmers.com/2023/01/30/which-gpu-for-deep-learning/), “Tensor Cores are most important, followed by the memory bandwidth of a GPU, the cache hierarchy, and only then FLOPS of a GPU.”. For more information, refer to the [NVIDIA GPU portfolio](https://www.nvidia.com/content/dam/en-zz/solutions/data-center/data-center-gpu-portfolio-line-card.pdf/). * **Performance requirements:** Evaluate the performance specifications you need, such as the number of GPUs, GPU memory, processing power, and network bandwidth. You need a lot of memory and fast storage for demanding tasks like training larger Deep Learning models. * **GPU type:** Scaleway offers different GPU types, such as various NVIDIA GPUs. Each GPU has varying levels of performance, memory, and capabilities. Choose a GPU that aligns with your specific workload requirements. -* **GPU memory:** GPU memory bandwidth is an important criterion influencing overall performance. Then, larger GPU memory (VRAM) is crucial for memory-intensive tasks like training larger deep learning models, especially when using larger batch sizes. Modern GPUs offer specialized data formats designed to optimize deep learning performance. These formats, including Bfloat16, [FP8](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html/), int8 and int4, enable the storage of more data in memory and can enhance performance (for example, moving from FP16 to FP8 can double the number of TFLOPS). To make an informed decision, it is thus crucial to select the appropriate architecture. Options range from Pascal and Ampere to Ada Lovelace and Hopper. Ensuring that the GPU possesses sufficient memory capacity to accommodate your specific workload is essential, preventing any potential memory-related bottlenecks. Equally important, is matching the GPU's memory type to the nature of your workload. +* **GPU memory:** GPU memory bandwidth is an important criterion influencing overall performance. Then, larger GPU memory (VRAM) is crucial for memory-intensive tasks like training larger deep learning models, especially when using larger batch sizes. Modern GPUs offer specialized data formats designed to optimize deep learning performance. These formats, including Bfloat16, [FP8](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html), int8 and int4, enable the storage of more data in memory and can enhance performance (for example, moving from FP16 to FP8 can double the number of TFLOPS). To make an informed decision, it is thus crucial to select the appropriate architecture. Options range from Pascal and Ampere to Ada Lovelace and Hopper. Ensuring that the GPU possesses sufficient memory capacity to accommodate your specific workload is essential, preventing any potential memory-related bottlenecks. Equally important, is matching the GPU's memory type to the nature of your workload. * **CPU and RAM:** A powerful CPU can be beneficial for tasks that involve preprocessing or post-processing. Sufficient system memory is also crucial to prevent memory-related bottlenecks or to cache your data in RAM. * **GPU driver and software compatibility:** Ensure that the GPU Instance type you choose supports the GPU drivers and software frameworks you need for your workload. This includes CUDA libraries, machine learning frameworks (TensorFlow, PyTorch, etc.), and other specific software tools. For all [Scaleway GPU OS images](/compute/gpu/reference-content/docker-images/), we offer a driver version that enables the use of all GPUs, from the oldest to the latest models. As is the NGC CLI, `nvidia-docker` is preinstalled, enabling containers to be used with CUDA, cuDNN, and the main deep learning frameworks. * **Scaling:** Consider the scalability requirements of your workload. The most efficient way to scale up your workload is by using: diff --git a/compute/gpu/reference-content/kubernetes-gpu-time-slicing.mdx b/compute/gpu/reference-content/kubernetes-gpu-time-slicing.mdx index de52aa7ba2..2d49783872 100644 --- a/compute/gpu/reference-content/kubernetes-gpu-time-slicing.mdx +++ b/compute/gpu/reference-content/kubernetes-gpu-time-slicing.mdx @@ -47,7 +47,7 @@ Kubernetes GPU time-slicing divides the GPU resources at the container level wit While time-slicing facilitates shared GPU access across a broader user spectrum, it comes with a trade-off. It sacrifices the memory and fault isolation advantages inherent to MIG. Additionally, it presents a solution to enable shared GPU access on earlier GPU generations lacking MIG support. Combining MIG and time-slicing is feasible to expand the scope of shared access to MIG instances. -For more information and examples about NVIDIA GPUs time-slicing using Kubernetes, refer to the [official documentation](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/23.6.0/gpu-sharing.html/). +For more information and examples about NVIDIA GPUs time-slicing using Kubernetes, refer to the [official documentation](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/23.6.0/gpu-sharing.html). Using time-slicing for GPUs with Kubernetes could bring overhead due to context-switching, potentially affecting GPU-intensive operations' performance. diff --git a/compute/gpu/reference-content/understanding-nvidia-fp8.mdx b/compute/gpu/reference-content/understanding-nvidia-fp8.mdx index 48e0687709..676637fa8e 100644 --- a/compute/gpu/reference-content/understanding-nvidia-fp8.mdx +++ b/compute/gpu/reference-content/understanding-nvidia-fp8.mdx @@ -27,4 +27,4 @@ The `E5M2` format adapts the IEEE FP16 format, allocating five bits to the expon The FP8 standard preserves accuracy comparable to 16-bit formats across a wide range of applications, architectures, and networks. -For more information about the FP8 standard, and instructions how to use it with H100 GPU Instances, refer to NVIDIA's [offical FP8 documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html/) and the [code example repository](https://github.com/NVIDIA/TransformerEngine/tree/main/examples/). \ No newline at end of file +For more information about the FP8 standard, and instructions how to use it with H100 GPU Instances, refer to NVIDIA's [offical FP8 documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html) and the [code example repository](https://github.com/NVIDIA/TransformerEngine/tree/main/examples/). \ No newline at end of file diff --git a/compute/gpu/reference-content/understanding-nvidia-nemo.mdx b/compute/gpu/reference-content/understanding-nvidia-nemo.mdx index 7a8431d2a2..56eb73f79d 100644 --- a/compute/gpu/reference-content/understanding-nvidia-nemo.mdx +++ b/compute/gpu/reference-content/understanding-nvidia-nemo.mdx @@ -52,4 +52,4 @@ NVIDIA NeMo can be used for various applications such as: - Building chat bots. - Developing natural language understanding models for various applications. -Developers, researchers, and companies interested in developing conversational AI models can benefit from NVIDIA NeMo to speed up the development process and create high-quality models. For more information, refer to the [official NVIDIA NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/index.html/). \ No newline at end of file +Developers, researchers, and companies interested in developing conversational AI models can benefit from NVIDIA NeMo to speed up the development process and create high-quality models. For more information, refer to the [official NVIDIA NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/index.html). \ No newline at end of file diff --git a/compute/gpu/reference-content/understanding-nvidia-ngc.mdx b/compute/gpu/reference-content/understanding-nvidia-ngc.mdx index 4fc271bd41..2bac1c5233 100644 --- a/compute/gpu/reference-content/understanding-nvidia-ngc.mdx +++ b/compute/gpu/reference-content/understanding-nvidia-ngc.mdx @@ -33,4 +33,4 @@ NGC provides a repository of pre-configured containers, models, and software sta NVIDIA closely collaborates with software developers to optimize leading AI and machine learning frameworks for peak performance on NVIDIA GPUs. This optimization significantly expedites both training and inference tasks. Software hosted on NGC undergoes scans against an aggregated set of common vulnerabilities and exposures (CVEs), crypto, and private keys. -For more information on NGC, refer to the official [NVIDIA NGC documentation](https://docs.nvidia.com/ngc/index.html/). +For more information on NGC, refer to the official [NVIDIA NGC documentation](https://docs.nvidia.com/ngc/index.html). diff --git a/compute/gpu/troubleshooting/install-nvidia-drivers-ubuntu.mdx b/compute/gpu/troubleshooting/install-nvidia-drivers-ubuntu.mdx index 015fccc4da..bfc9926ba0 100644 --- a/compute/gpu/troubleshooting/install-nvidia-drivers-ubuntu.mdx +++ b/compute/gpu/troubleshooting/install-nvidia-drivers-ubuntu.mdx @@ -69,6 +69,6 @@ If you encounter errors or issues during the installation process, consider the ## Additional links - [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/) -- [Frameworks Support Matrix - NVIDIA Docs](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html/) +- [Frameworks Support Matrix - NVIDIA Docs](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html) - [How to access the GPU using Docker](/compute/gpu/how-to/use-gpu-with-docker/) -- [NVIDIA Container Toolkit documentation](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html/) \ No newline at end of file +- [NVIDIA Container Toolkit documentation](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html) \ No newline at end of file diff --git a/compute/instances/api-cli/using-cloud-init.mdx b/compute/instances/api-cli/using-cloud-init.mdx index 5d49037842..5c60dcf06c 100644 --- a/compute/instances/api-cli/using-cloud-init.mdx +++ b/compute/instances/api-cli/using-cloud-init.mdx @@ -88,6 +88,6 @@ Subcommands: ```` -For detailed information on cloud-init, refer to the official cloud-init [documentation](http://cloudinit.readthedocs.io/en/latest/index.html/). +For detailed information on cloud-init, refer to the official cloud-init [documentation](http://cloudinit.readthedocs.io/en/latest/index.html). diff --git a/compute/instances/how-to/connect-to-instance.mdx b/compute/instances/how-to/connect-to-instance.mdx index 131f75b676..e3e4157d85 100644 --- a/compute/instances/how-to/connect-to-instance.mdx +++ b/compute/instances/how-to/connect-to-instance.mdx @@ -55,7 +55,7 @@ This page shows how to connect to your Scaleway Instance via SSH. Thanks to the To connect to your Instance from Windows, you will need to use a small application called **PuTTY**, an SSH client. - 1. [Download and install PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/). + 1. [Download and install PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html). 2. Launch PuTTY on your computer. The main screen of the application displays: 3. Enter your Instance's IP address in the **Hostname** field. diff --git a/compute/instances/quickstart.mdx b/compute/instances/quickstart.mdx index 05d9638efc..e75b511edd 100644 --- a/compute/instances/quickstart.mdx +++ b/compute/instances/quickstart.mdx @@ -75,7 +75,7 @@ You are now connected to your Instance. To connect to your Instance from Windows, you will need to use a small application called **PuTTY**, an SSH client. -1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) +1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) 2. Launch PuTTY on your computer. 3. Enter your Instance's IP address in the **Hostname** field. 4. In the side menu, under **Connection**, navigate to the **Auth** sub-category. (**Connection** > **SSH** > **Auth**). diff --git a/compute/instances/reference-content/preventing-outgoing-ddos.mdx b/compute/instances/reference-content/preventing-outgoing-ddos.mdx index 768f465792..db80a46b5d 100644 --- a/compute/instances/reference-content/preventing-outgoing-ddos.mdx +++ b/compute/instances/reference-content/preventing-outgoing-ddos.mdx @@ -86,14 +86,14 @@ To configure securely your DNS server, proceed as follows: - If you need recursion, limit the authorized range of IPs that can perform those requests. - [BIND](https://kb.isc.org/docs/aa-01316/) - [unbound](https://nlnetlabs.nl/documentation/unbound/unbound.conf/) - - If you use PowerDNS, you can also use [dnsdist](https://dnsdist.org/index.html/). + - If you use PowerDNS, you can also use [dnsdist](https://dnsdist.org/index.html). - Enable RateLimiting of queries and answers from your authoritative DNS - [BIND](https://kb.isc.org/docs/aa-00994/) - [unbound](https://nlnetlabs.nl/documentation/unbound/unbound.conf/) - - If you use PowerDNS, you can also use [dnsdist](https://dnsdist.org/index.html/). + - If you use PowerDNS, you can also use [dnsdist](https://dnsdist.org/index.html). - Set ACL on your remote control if used and limit it to localhost if possible - - [rndc for BIND](https://mirror.apps.cam.ac.uk/pub/doc/redhat/redhat7.3/rhl-rg-en-7.3/s1-bind-rndc.html/) - - [dnsdist for PowerDNS](https://dnsdist.org/index.html/) + - [rndc for BIND](https://mirror.apps.cam.ac.uk/pub/doc/redhat/redhat7.3/rhl-rg-en-7.3/s1-bind-rndc.html) + - [dnsdist for PowerDNS](https://dnsdist.org/index.html) - [unbound-control for unbound](https://nlnetlabs.nl/documentation/unbound/unbound-control/) ## Preventing HTTP(s) proxy from being used in a DDoS attack diff --git a/compute/instances/troubleshooting/fix-dns-routed-ipv6-only-debian-bullseye.mdx b/compute/instances/troubleshooting/fix-dns-routed-ipv6-only-debian-bullseye.mdx index 32c0958328..0ba6c59e09 100644 --- a/compute/instances/troubleshooting/fix-dns-routed-ipv6-only-debian-bullseye.mdx +++ b/compute/instances/troubleshooting/fix-dns-routed-ipv6-only-debian-bullseye.mdx @@ -29,7 +29,7 @@ This guide outlines the steps to enable DNS resolution on a Scaleway Instance th When a Scaleway Instance uses routed IP addresses, the IPv6 stack is automatically configured using [`SLAAC`](https://datatracker.ietf.org/doc/html/rfc4862/). With this method, the Instance is periodically advertised with various network configuration details, including the DNS server addresses it should use. The Instance is then free to consume these advertisements or not. By default, the operating system images provided by Scaleway are configured to leverage these advertisements to configure the IPv6 networking and the related DNS servers. The Debian Bullseye image is no exception. -When configuring the network at boot time, the `cloud-init` software detects the appropriate network configuration method used by the system at hand and writes and/or applies the necessary configuration files/parameters. On Debian Bullseye, and because of [`cloud-init`'s built-in order of detection](https://cloudinit.readthedocs.io/en/latest/reference/network-config.html#network-output-policy/), the primary detected method is [ENI](https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-eni.html/), which configures the network through Debian's well known `/etc/network/interfaces` set of files, along with the `ifupdown` toolset. +When configuring the network at boot time, the `cloud-init` software detects the appropriate network configuration method used by the system at hand and writes and/or applies the necessary configuration files/parameters. On Debian Bullseye, and because of [`cloud-init`'s built-in order of detection](https://cloudinit.readthedocs.io/en/latest/reference/network-config.html#network-output-policy/), the primary detected method is [ENI](https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-eni.html), which configures the network through Debian's well known `/etc/network/interfaces` set of files, along with the `ifupdown` toolset. This configuration method does not interact well with SLAAC's DNS advertisements. This results in an absence of DNS resolver configuration, thus breaking most of the network activities. diff --git a/console/billing/api-cli/retrieve-monthly-consumption.mdx b/console/billing/api-cli/retrieve-monthly-consumption.mdx index 9ed8cded3e..8c8c66d8a9 100644 --- a/console/billing/api-cli/retrieve-monthly-consumption.mdx +++ b/console/billing/api-cli/retrieve-monthly-consumption.mdx @@ -19,7 +19,7 @@ Follow the procedure below to download your monthly consumption using the Scalew - A Scaleway account and logged into the [console](https://console.scaleway.com/organization/) - Created an [API key](https://www.scaleway.com/en/docs/identity-and-access-management/iam/how-to/create-api-keys/) with sufficient [IAM permissions](https://www.scaleway.com/en/docs/identity-and-access-management/iam/reference-content/permission-sets/) to perform the actions described on this page -- [Installed `curl`](https://curl.se/download.html/) +- [Installed `curl`](https://curl.se/download.html) - Configured your environment variables. ## Exporting your environment variables diff --git a/console/billing/how-to/add-payment-method.mdx b/console/billing/how-to/add-payment-method.mdx index 39454a7a31..122ad92673 100644 --- a/console/billing/how-to/add-payment-method.mdx +++ b/console/billing/how-to/add-payment-method.mdx @@ -54,7 +54,7 @@ Before you can order Scaleway resources, you must add a payment method to your a * This method requires a successful [KYC verification](/console/account/how-to/verify-identity/). - * To add a SEPA mandate, both your postal and bank addresses must be part of the [SEPA zone](https://www.ecb.europa.eu/paym/integration/retail/sepa/html/index.en.html/). + * To add a SEPA mandate, both your postal and bank addresses must be part of the [SEPA zone](https://www.ecb.europa.eu/paym/integration/retail/sepa/html/index.en.html). 1. Access the [Scaleway console](https://console.scaleway.com/organization/). diff --git a/containers/kubernetes/api-cli/managing-storage.mdx b/containers/kubernetes/api-cli/managing-storage.mdx index 695ad513e4..8b7b86fb6b 100644 --- a/containers/kubernetes/api-cli/managing-storage.mdx +++ b/containers/kubernetes/api-cli/managing-storage.mdx @@ -141,7 +141,7 @@ StorageClass parameters. You will also need a passphrase to encrypt/decrypt the which is taken from the secrets passed to the `NodeStageVolume` and `NodeExpandVolume` method. The [external-provisioner](https://github.com/kubernetes-csi/external-provisioner/) -can be used to [pass down the wanted secret to the CSI plugin](https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html/) (v1.0.1+). +can be used to [pass down the wanted secret to the CSI plugin](https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html) (v1.0.1+). Some additional parameters are needed on the StorageClass: diff --git a/containers/kubernetes/how-to/use-nvidia-gpu-operator.mdx b/containers/kubernetes/how-to/use-nvidia-gpu-operator.mdx index 5d12d33ad1..8f9aaef38b 100644 --- a/containers/kubernetes/how-to/use-nvidia-gpu-operator.mdx +++ b/containers/kubernetes/how-to/use-nvidia-gpu-operator.mdx @@ -17,7 +17,7 @@ categories: Kubernetes Kapsule and Kosmos support NVIDIA's official Kubernetes operator for all GPU pools. This operator is compatible with [RENDER-S](https://www.scaleway.com/en/gpu-instances/), [GPU-3070-S](https://www.scaleway.com/en/gpu-3070-instances/), [H100 PCIe](https://www.scaleway.com/en/h100-pcie-try-it-now/), [L40s](https://www.scaleway.com/en/l40s-gpu-instance/) and [L4](https://www.scaleway.com/en/l4-gpu-instance/) offers. -The GPU operator is set up for all GPU pools created in Kubernetes Kapsule and Kosmos, providing automated installation of all required software on GPU worker nodes, such as the device plugin, container toolkit, GPU drivers etc. For more information, refer to [the GPU operator overview](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html/). +The GPU operator is set up for all GPU pools created in Kubernetes Kapsule and Kosmos, providing automated installation of all required software on GPU worker nodes, such as the device plugin, container toolkit, GPU drivers etc. For more information, refer to [the GPU operator overview](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html). diff --git a/dedibox-console/classic-hosting/how-to/configure-htaccess-file.mdx b/dedibox-console/classic-hosting/how-to/configure-htaccess-file.mdx index 8b87b6dc21..32a0509292 100644 --- a/dedibox-console/classic-hosting/how-to/configure-htaccess-file.mdx +++ b/dedibox-console/classic-hosting/how-to/configure-htaccess-file.mdx @@ -104,7 +104,7 @@ This function allows you to replace the message of a **404 error (Not found)** f ``` 2. Upload the file using FTP in the folder of the concerned subdomain (folder `www` for `www.URLexample`, `blog` for `blog.URLexample` etc.) and name it `.htaccess`. - - Take a look at the documentation on the [Apache website](http://httpd.apache.org/docs/2.0/mod/mod_alias.html/) for more information. + - Take a look at the documentation on the [Apache website](http://httpd.apache.org/docs/2.0/mod/mod_alias.html) for more information. ## How to configure an HTML redirection diff --git a/dedibox-console/cpanel-hosting/reference-content.mdx b/dedibox-console/cpanel-hosting/reference-content.mdx index 1e9ded7e80..5f37f2fdbf 100644 --- a/dedibox-console/cpanel-hosting/reference-content.mdx +++ b/dedibox-console/cpanel-hosting/reference-content.mdx @@ -42,15 +42,15 @@ If you need additional help setting up your webhosting solution, or have any que ## JetBackup -* [Full Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fullBackups.html/) -* [File Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fileBackups.html/) -* [Cron Job Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/cronJobBackups.html/) -* [DNS Zone Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dnsBackups.html/) -* [Database Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dbBackups.html/) -* [Email Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/emailBackups.html/) -* [Queue](https://docs.jetbackup.com/manual/cpanel/queue.html/) -* [Snapshots](https://docs.jetbackup.com/manual/cpanel/snapshots.html/) -* [Settings](https://docs.jetbackup.com/manual/cpanel/settings.html/) +* [Full Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fullBackups.html) +* [File Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fileBackups.html) +* [Cron Job Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/cronJobBackups.html) +* [DNS Zone Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dnsBackups.html) +* [Database Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dbBackups.html) +* [Email Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/emailBackups.html) +* [Queue](https://docs.jetbackup.com/manual/cpanel/queue.html) +* [Snapshots](https://docs.jetbackup.com/manual/cpanel/snapshots.html) +* [Settings](https://docs.jetbackup.com/manual/cpanel/settings.html) ## Databases diff --git a/dedibox-network/network/how-to/configure-network-netplan.mdx b/dedibox-network/network/how-to/configure-network-netplan.mdx index d22c11e1f8..3f7a78bcda 100644 --- a/dedibox-network/network/how-to/configure-network-netplan.mdx +++ b/dedibox-network/network/how-to/configure-network-netplan.mdx @@ -13,7 +13,7 @@ categories: - dedibox-network --- -Since the release of its version 18.04, Bionic Beaver, [Ubuntu](http://www.ubuntu.org/) has switched to [Netplan](http://netplan.io/) for the network interface configuration. It is a [YAML](https://yaml.org/spec/1.2/spec.html/) based configuration system, which simplifies the configuration process. +Since the release of its version 18.04, Bionic Beaver, [Ubuntu](http://www.ubuntu.org/) has switched to [Netplan](http://netplan.io/) for the network interface configuration. It is a [YAML](https://yaml.org/spec/1.2/spec.html) based configuration system, which simplifies the configuration process. diff --git a/dedibox-network/rpn/how-to/configure-jumboframes.mdx b/dedibox-network/rpn/how-to/configure-jumboframes.mdx index 0edbe7704b..339a37eac7 100644 --- a/dedibox-network/rpn/how-to/configure-jumboframes.mdx +++ b/dedibox-network/rpn/how-to/configure-jumboframes.mdx @@ -89,7 +89,7 @@ This technology allows you to significantly minimize the interruptions and proce ### On Windows -To change the MTU settings on Windows, you can use a tool like [TCP Optimizer](http://www.clubic.com/telecharger-fiche305576-tcp-optimizer.html/) to modify the MTU settings. +To change the MTU settings on Windows, you can use a tool like [TCP Optimizer](http://www.clubic.com/telecharger-fiche305576-tcp-optimizer.html) to modify the MTU settings. 1. Download and start the tool. 2. In the bottom right corner, click **Custom**. diff --git a/dedibox/dedicated-servers/how-to/connect-to-server.mdx b/dedibox/dedicated-servers/how-to/connect-to-server.mdx index 9bccfd51e2..869d013ef3 100644 --- a/dedibox/dedicated-servers/how-to/connect-to-server.mdx +++ b/dedibox/dedicated-servers/how-to/connect-to-server.mdx @@ -51,7 +51,7 @@ You can also use FTP, SFTP, or SCP to transfer data to and from your server. To connect to your server from Windows, you will need to use a small application called **PuTTY**, an SSH client. -1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) +1. Download and install PuTTY [here](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) 2. Launch PuTTY on your computer. The main screen of the application displays: 3. Enter your server's IP address in the **Hostname** field. diff --git a/dedibox/dedicated-servers/quickstart.mdx b/dedibox/dedicated-servers/quickstart.mdx index fa5df7abdb..79373a15eb 100644 --- a/dedibox/dedicated-servers/quickstart.mdx +++ b/dedibox/dedicated-servers/quickstart.mdx @@ -124,7 +124,7 @@ To connect to your server from Windows, you will need to use a small application To connect to your Instance from Windows, you will need to use a small application called **PuTTY**, an SSH client. -1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/). +1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html). 2. Launch PuTTY on your computer. The main screen of the application displays. 3. Enter your Instance's IP address in the **Hostname** field. diff --git a/dedibox/kvm-over-ip/how-to/dell-idrac6.mdx b/dedibox/kvm-over-ip/how-to/dell-idrac6.mdx index ca1ad8e196..fa5148b8c2 100644 --- a/dedibox/kvm-over-ip/how-to/dell-idrac6.mdx +++ b/dedibox/kvm-over-ip/how-to/dell-idrac6.mdx @@ -18,7 +18,7 @@ This page shows you how to use [KVM](/dedibox/kvm-over-ip/concepts/#kvm-over-ip) - A Dedibox account logged into the [console](https://console.online.net/) -- Installed [Java](https://www.java.com/en/download/help/download_options.html/) on your local computer +- Installed [Java](https://www.java.com/en/download/help/download_options.html) on your local computer - A Dedibox server with a [Dell iDRAC 6](https://www.dell.com/support/kbdoc/en-us/000123577/set-up-and-manage-idrac-6-lifecycle-controller-for-dell-poweredge-11g-servers/) KVM-over-IP device ## Creating an iDRAC 6 session diff --git a/dedibox/kvm-over-ip/how-to/dell-idrac7.mdx b/dedibox/kvm-over-ip/how-to/dell-idrac7.mdx index 1de44fe163..c0246c3b84 100644 --- a/dedibox/kvm-over-ip/how-to/dell-idrac7.mdx +++ b/dedibox/kvm-over-ip/how-to/dell-idrac7.mdx @@ -18,7 +18,7 @@ This page shows you how to use [KVM](/dedibox/kvm-over-ip/concepts/#kvm-over-ip) - A Dedibox account logged into the [console](https://console.online.net/) -- Installed [Java](https://www.java.com/en/download/help/download_options.html/) on your local computer +- Installed [Java](https://www.java.com/en/download/help/download_options.html) on your local computer - A Dedibox server with a [Dell iDRAC 7](https://www.dell.com/support/manuals/en-us/integrated-dell-remote-access-cntrllr-7-v1.50.50/idrac7ug1.50.50-v1/overview/) KVM-over-IP device # Creating an iDRAC 7 Session diff --git a/dedibox/kvm-over-ip/how-to/dell-idrac8.mdx b/dedibox/kvm-over-ip/how-to/dell-idrac8.mdx index 13c49e8925..e361848978 100644 --- a/dedibox/kvm-over-ip/how-to/dell-idrac8.mdx +++ b/dedibox/kvm-over-ip/how-to/dell-idrac8.mdx @@ -18,7 +18,7 @@ This page shows you how to use [KVM](/dedibox/kvm-over-ip/concepts/#kvm-over-ip) - A Dedibox account logged into the [console](https://console.online.net/) -- Installed [Java](https://www.java.com/en/download/help/download_options.html/) on your local computer +- Installed [Java](https://www.java.com/en/download/help/download_options.html) on your local computer - A Dedibox server with a [DELL iDRAC 8](https://www.dell.com/support/manuals/en-us/idrac8-with-lc-v2.05.05.05/idrac8_2.05.05.05_ug/overview/) KVM-over-IP device ## Creating an iDRAC 8 session diff --git a/dedibox/kvm-over-ip/how-to/dell-idrac9.mdx b/dedibox/kvm-over-ip/how-to/dell-idrac9.mdx index d14305ff39..80a08ad432 100644 --- a/dedibox/kvm-over-ip/how-to/dell-idrac9.mdx +++ b/dedibox/kvm-over-ip/how-to/dell-idrac9.mdx @@ -18,7 +18,7 @@ This page shows you how to use [KVM](/dedibox/kvm-over-ip/concepts/#kvm-over-ip) - A Dedibox account logged into the [console](https://console.online.net/) -- Installed [Java](https://www.java.com/en/download/help/download_options.html/) on your local computer +- Installed [Java](https://www.java.com/en/download/help/download_options.html) on your local computer - A Dedibox server with a [DELL iDRAC 9](https://www.dell.com/support/manuals/en-us/idrac8-with-lc-v2.05.05.05/idrac8_2.05.05.05_ug/overview/) KVM-over-IP device ## Creating an iDRAC 9 session diff --git a/dedibox/kvm-over-ip/how-to/hp-ilo.mdx b/dedibox/kvm-over-ip/how-to/hp-ilo.mdx index 859604e7a2..d36f96b0c7 100644 --- a/dedibox/kvm-over-ip/how-to/hp-ilo.mdx +++ b/dedibox/kvm-over-ip/how-to/hp-ilo.mdx @@ -18,7 +18,7 @@ This page shows you how to use [KVM](/dedibox/kvm-over-ip/concepts/#kvm-over-ip) - A Dedibox account logged into the [console](https://console.online.net/) -- A Dedibox server with a KVM-over-IP device: [HP iLO (integrated lights-out)](https://www.hpe.com/us/en/servers/integrated-lights-out-ilo.html/) +- A Dedibox server with a KVM-over-IP device: [HP iLO (integrated lights-out)](https://www.hpe.com/us/en/servers/integrated-lights-out-ilo.html) ## Creating an iLO session @@ -39,7 +39,7 @@ The connection URL and your credentials display. Click on the link to access the 1. Open the connection URL of your iLO in your web browser, then log in using the credentials displayed during access creation. 2. Click **Integrated Remote Console**. - Depending on the model of your dedicated server, iLO may propose either an **HTML5-based** KVM-over-IP interface or a **Java-based** one. Make sure to have [Java installed](https://www.java.com/en/download/help/download_options.html/) on your local computer if you want to use this version. + Depending on the model of your dedicated server, iLO may propose either an **HTML5-based** KVM-over-IP interface or a **Java-based** one. Make sure to have [Java installed](https://www.java.com/en/download/help/download_options.html) on your local computer if you want to use this version. 3. Click the **Disc icon** > **CD/DVD** > **Local *.iso file** and select the local installation image of your operating system. diff --git a/dedibox/kvm-over-ip/how-to/quanta-computer.mdx b/dedibox/kvm-over-ip/how-to/quanta-computer.mdx index 3dde94fa96..0a17484f54 100644 --- a/dedibox/kvm-over-ip/how-to/quanta-computer.mdx +++ b/dedibox/kvm-over-ip/how-to/quanta-computer.mdx @@ -18,7 +18,7 @@ This page shows you how to use [KVM](/dedibox/kvm-over-ip/concepts/#kvm-over-ip) - A Dedibox account logged into the [console](https://console.online.net/) -- Installed [Java](https://www.java.com/en/download/help/download_options.html/) on your local computer +- Installed [Java](https://www.java.com/en/download/help/download_options.html) on your local computer - A Dedibox server with a [Quanta Computer](https://www.qct.io/) KVM-over-IP device ## Creating a KVM session diff --git a/dedibox/kvm-over-ip/how-to/supermicro.mdx b/dedibox/kvm-over-ip/how-to/supermicro.mdx index c1fdfb561d..9669dc89bf 100644 --- a/dedibox/kvm-over-ip/how-to/supermicro.mdx +++ b/dedibox/kvm-over-ip/how-to/supermicro.mdx @@ -18,7 +18,7 @@ This page shows you how to use [KVM](/dedibox/kvm-over-ip/concepts/#kvm-over-ip) - A Dedibox account logged into the [console](https://console.online.net/) -- Installed [Java](https://www.java.com/en/download/help/download_options.html/) on your local computer +- Installed [Java](https://www.java.com/en/download/help/download_options.html) on your local computer - A Dedibox server with a [Supermicro](https://www.supermicro.com/en/solutions/management-software/bmc-resources/) KVM-over-IP device ## Creating a KVM Session diff --git a/dedibox/vps/how-to/connect-vps.mdx b/dedibox/vps/how-to/connect-vps.mdx index 54d6fe790e..d051dfcdf2 100644 --- a/dedibox/vps/how-to/connect-vps.mdx +++ b/dedibox/vps/how-to/connect-vps.mdx @@ -38,7 +38,7 @@ To connect to your Dedibox VPS from Linux or Mac OSX, follow these steps: ## Connecting from Windows To connect to your Dedibox VPS from Windows, follow these steps: -1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/). +1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html). You also have the option to use the [Windows SSH client](https://learn.microsoft.com/en-us/windows/terminal/tutorials/ssh/), which is available on Windows 10 or 11 operating systems. diff --git a/dedibox/vps/quickstart.mdx b/dedibox/vps/quickstart.mdx index a226a8b2da..b01a9babcf 100644 --- a/dedibox/vps/quickstart.mdx +++ b/dedibox/vps/quickstart.mdx @@ -56,7 +56,7 @@ You can manage your Dedibox VPS by accessing it through SSH. ### Windows connection -1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/). +1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html). 2. Launch PuTTY and enter your Dedibox VPS's IP address. 3. Click "Open" to establish the connection. 4. Enter the username `root` and your password to log in. diff --git a/faq/instances.mdx b/faq/instances.mdx index e47630aecb..b3b7f90f83 100644 --- a/faq/instances.mdx +++ b/faq/instances.mdx @@ -263,7 +263,7 @@ Both ENT1 and POP2 Instance types share the following features: - Same pricing structure - Accelerated booting process -POP2 Instances provide CPU- and memory-optimized variants tailored to suit your workload requirements more effectively. The primary distinction between ENT1 and POP2 lies in [AMD Secure Encrypted Virtualization (SEV)](https://www.amd.com/fr/developer/sev.html/), which is disabled for POP2 Instances. +POP2 Instances provide CPU- and memory-optimized variants tailored to suit your workload requirements more effectively. The primary distinction between ENT1 and POP2 lies in [AMD Secure Encrypted Virtualization (SEV)](https://www.amd.com/fr/developer/sev.html), which is disabled for POP2 Instances. By choosing POP2 Instances, you gain access to the latest features, such as the potential for live migration of Instances in the future, ensuring that your infrastructure remains aligned with evolving demands and technological advancements. We recommend choosing POP2 Instances for most general workloads unless your specific workload requires features unique to ENT1 Instances. diff --git a/identity-and-access-management/organizations-and-projects/how-to/create-ssh-key.mdx b/identity-and-access-management/organizations-and-projects/how-to/create-ssh-key.mdx index 6d690032b4..f6d5f14cf9 100644 --- a/identity-and-access-management/organizations-and-projects/how-to/create-ssh-key.mdx +++ b/identity-and-access-management/organizations-and-projects/how-to/create-ssh-key.mdx @@ -76,9 +76,9 @@ On macOS and Linux, you can generate the SSH key pair directly from the terminal ### How to generate an Ed25519 SSH key pair on Windows -On Windows, you can use the third-party application [PuTTYgen](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) to generate an SSH key pair. +On Windows, you can use the third-party application [PuTTYgen](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) to generate an SSH key pair. -1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) on your local computer. The **PuTTYgen** application is automatically installed along with the main PuTTY application. +1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) on your local computer. The **PuTTYgen** application is automatically installed along with the main PuTTY application. 2. Launch PuTTYgen by double-clicking the application icon. 3. Select **EdDSA** and click the **Generate** button. You can also add a passphrase before generating the key to increase security. diff --git a/identity-and-access-management/organizations-and-projects/quickstart.mdx b/identity-and-access-management/organizations-and-projects/quickstart.mdx index 16ec0b4a4d..1989861061 100644 --- a/identity-and-access-management/organizations-and-projects/quickstart.mdx +++ b/identity-and-access-management/organizations-and-projects/quickstart.mdx @@ -63,9 +63,9 @@ On OSX and Linux, you can generate the SSH key pair directly from the terminal ( ### How to generate an SSH key pair on Windows -On Windows, you can use the third-party application [PuTTYgen](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) to generate an SSH key pair. +On Windows, you can use the third-party application [PuTTYgen](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) to generate an SSH key pair. -1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) to your local computer. The **PuTTYgen** application is automatically installed (as well as the main PuTTY application). +1. Download and install [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) to your local computer. The **PuTTYgen** application is automatically installed (as well as the main PuTTY application). 2. Launch PuTTYgen by double-clicking on the application icon. The following screen displays. 3. Select **RSA**, set the number of bits in the key to **4096** and click the **Generate** button:2. Launch PuTTYgen by double-clicking on the application icon. The following screen displays. diff --git a/managed-services/iot-hub/how-to/understand-event-messages.mdx b/managed-services/iot-hub/how-to/understand-event-messages.mdx index 5fb1010e9f..6e354fce1c 100644 --- a/managed-services/iot-hub/how-to/understand-event-messages.mdx +++ b/managed-services/iot-hub/how-to/understand-event-messages.mdx @@ -69,12 +69,12 @@ This section shows you the types of message that can be received in IoT Hub Even - `"failed to connect to database. Error ERRNO: ERRMSG"`: The route could not connect to your database. - `ERRNO` and `ERRMSG` are respectively PostgreSQL standard error codes and messages. See [PostgreSQL documentation](https://www.postgresql.org/docs/11/errcodes-appendix.html/) + `ERRNO` and `ERRMSG` are respectively PostgreSQL standard error codes and messages. See [PostgreSQL documentation](https://www.postgresql.org/docs/11/errcodes-appendix.html) - `"failed to prepare 'QUERY' query. Error ERRNO: ERRMSG"`: The query preparation failed (rejected by PostgreSQL). - `ERRNO` and `ERRMSG` are respectively PostgreSQL standard error codes and messages. See [PostgreSQL documentation](https://www.postgresql.org/docs/11/errcodes-appendix.html/) + `ERRNO` and `ERRMSG` are respectively PostgreSQL standard error codes and messages. See [PostgreSQL documentation](https://www.postgresql.org/docs/11/errcodes-appendix.html) - `"failed to execute query. Error ERRNO: ERRMSG"`: Query execution failed. You will find the payload associated with this query in the field named `payload`. - `ERRNO` and `ERRMSG` are respectively PostgreSQL standard error codes and messages. See [PostgreSQL documentation](https://www.postgresql.org/docs/11/errcodes-appendix.html/) + `ERRNO` and `ERRMSG` are respectively PostgreSQL standard error codes and messages. See [PostgreSQL documentation](https://www.postgresql.org/docs/11/errcodes-appendix.html) diff --git a/managed-services/transactional-email/api-cli/send-emails-with-api.mdx b/managed-services/transactional-email/api-cli/send-emails-with-api.mdx index f46f685f22..5513b08b7a 100644 --- a/managed-services/transactional-email/api-cli/send-emails-with-api.mdx +++ b/managed-services/transactional-email/api-cli/send-emails-with-api.mdx @@ -20,7 +20,7 @@ This page shows you how to send a simple transactional email in `JSON` format to - A Scaleway account logged into the [console](https://console.scaleway.com/) - [Configured your API key](/identity-and-access-management/iam/how-to/create-api-keys/) - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization -- [Installed curl](https://curl.se/download.html/) +- [Installed curl](https://curl.se/download.html) - [Configured](/managed-services/transactional-email/how-to/configure-domain-with-transactional-email/) your domain name with Transactional Email - [Added SPF, DKIM](/managed-services/transactional-email/how-to/add-spf-dkim-records-to-your-domain/), [MX](/managed-services/transactional-email/how-to/add-mx-record-to-your-domain/) and [DMARC](/managed-services/transactional-email/how-to/add-mx-record-to-your-domain/) records to your domain diff --git a/managed-services/webhosting/reference-content/cpanel-reference-content.mdx b/managed-services/webhosting/reference-content/cpanel-reference-content.mdx index 3600c33078..82ddcbf485 100644 --- a/managed-services/webhosting/reference-content/cpanel-reference-content.mdx +++ b/managed-services/webhosting/reference-content/cpanel-reference-content.mdx @@ -42,15 +42,15 @@ If you need additional help setting up your Web Hosting plan, or have any questi ## JetBackup -* [Full Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fullBackups.html/) -* [File Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fileBackups.html/) -* [Cron Job Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/cronJobBackups.html/) -* [DNS Zone Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dnsBackups.html/) -* [Database Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dbBackups.html/) -* [Email Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/emailBackups.html/) -* [Queue](https://docs.jetbackup.com/manual/cpanel/queue.html/) -* [Snapshots](https://docs.jetbackup.com/manual/cpanel/snapshots.html/) -* [Settings](https://docs.jetbackup.com/manual/cpanel/settings.html/) +* [Full Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fullBackups.html) +* [File Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/fileBackups.html) +* [Cron Job Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/cronJobBackups.html) +* [DNS Zone Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dnsBackups.html) +* [Database Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/dbBackups.html) +* [Email Backups](https://docs.jetbackup.com/manual/cpanel/BackupTypes/emailBackups.html) +* [Queue](https://docs.jetbackup.com/manual/cpanel/queue.html) +* [Snapshots](https://docs.jetbackup.com/manual/cpanel/snapshots.html) +* [Settings](https://docs.jetbackup.com/manual/cpanel/settings.html) ## Databases diff --git a/observability/cockpit/concepts.mdx b/observability/cockpit/concepts.mdx index a6542bebb2..897a9d45fd 100644 --- a/observability/cockpit/concepts.mdx +++ b/observability/cockpit/concepts.mdx @@ -114,7 +114,7 @@ LogQL is [Grafana Loki’s language](https://grafana.com/docs/loki/latest/logql/ Logs are a data type that provides a record of all events and errors taking place during the lifecycle of your resources. They represent an excellent source of visibility if you want to know when a problem occurred, or which events correlate with it. -You can push logs with any Loki-compatible agent such as [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/), [Fluentd](https://docs.fluentd.org/), [Fluent Bit](https://docs.fluentbit.io/manual/) or [Logstash](https://www.elastic.co/guide/en/logstash/current/introduction.html/). +You can push logs with any Loki-compatible agent such as [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/), [Fluentd](https://docs.fluentd.org/), [Fluent Bit](https://docs.fluentbit.io/manual/) or [Logstash](https://www.elastic.co/guide/en/logstash/current/introduction.html). ## Managed alerts diff --git a/observability/cockpit/how-to/send-metrics-logs-to-cockpit.mdx b/observability/cockpit/how-to/send-metrics-logs-to-cockpit.mdx index 934db47365..5b18da4a7e 100644 --- a/observability/cockpit/how-to/send-metrics-logs-to-cockpit.mdx +++ b/observability/cockpit/how-to/send-metrics-logs-to-cockpit.mdx @@ -14,7 +14,7 @@ dates: --- This page shows you how to send [metrics](/observability/cockpit/concepts/#metric) and [logs](/observability/cockpit/concepts/#logs) to your [Cockpit](/observability/cockpit/concepts/#cockpit). You can push metrics with any `Prometheus Remote Write` compatible agent such as the [Prometheus](https://prometheus.io/docs/introduction/overview/), [Grafana](https://grafana.com/docs/agent/latest/) or [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) agents. -You can push logs with any Loki compatible agent such as the [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/), [Fluentd](https://docs.fluentd.org/), [Fluent Bit](https://docs.fluentbit.io/manual/) or [Logstash](https://www.elastic.co/guide/en/logstash/current/introduction.html/) agents. +You can push logs with any Loki compatible agent such as the [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/), [Fluentd](https://docs.fluentd.org/), [Fluent Bit](https://docs.fluentbit.io/manual/) or [Logstash](https://www.elastic.co/guide/en/logstash/current/introduction.html) agents. diff --git a/observability/cockpit/troubleshooting/resetting-grafana-password-via-the-api.mdx b/observability/cockpit/troubleshooting/resetting-grafana-password-via-the-api.mdx index 8385d6453a..d96c008b6d 100644 --- a/observability/cockpit/troubleshooting/resetting-grafana-password-via-the-api.mdx +++ b/observability/cockpit/troubleshooting/resetting-grafana-password-via-the-api.mdx @@ -18,7 +18,7 @@ This page shows you how to reset your Grafana user's password through the Scalew - A Scaleway account logged into the [console](https://console.scaleway.com/) - [Created](/identity-and-access-management/iam/how-to/create-api-keys/) an API key with sufficient [IAM permissions](/identity-and-access-management/iam/reference-content/permission-sets/) to perform the actions described on this page - - [Installed](https://curl.se/download.html/) curl + - [Installed](https://curl.se/download.html) curl Open a terminal and paste the following command to retrieve your Grafana user ID: diff --git a/serverless/containers/api-cli/deploy-container-api.mdx b/serverless/containers/api-cli/deploy-container-api.mdx index 5b51391170..c2681da8a2 100644 --- a/serverless/containers/api-cli/deploy-container-api.mdx +++ b/serverless/containers/api-cli/deploy-container-api.mdx @@ -22,7 +22,7 @@ Refer to the [Scaleway Developers website](https://www.scaleway.com/en/developer - A Scaleway account logged into the [console](https://console.scaleway.com/) - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization - A valid [API key](/identity-and-access-management/iam/how-to/create-api-keys/) -- Installed [curl](https://curl.se/download.html/) +- Installed [curl](https://curl.se/download.html) - Made your first [request](https://www.scaleway.com/en/developers/api/#quickstart:-first-request/) using the Scaleway API 1. Run the following command in your terminal to create a containers namespace: diff --git a/serverless/containers/reference-content/configure-trigger-inputs.mdx b/serverless/containers/reference-content/configure-trigger-inputs.mdx index 3882f3f727..ec273afe63 100644 --- a/serverless/containers/reference-content/configure-trigger-inputs.mdx +++ b/serverless/containers/reference-content/configure-trigger-inputs.mdx @@ -24,7 +24,7 @@ Triggers get events from an input, such as an SQS queue, and forward them to a c Triggers only keep a buffer of the messages **that are in-flight**, they do not drain all the messages of the input in advance. -As a result, in some scenarios such as event bursts or slow computations, events may stay in the input buffer for a while before being consumed. If the input messages in the queue are set with a timeout, such as the [MessageRetentionPeriod](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html/) in SQS queues, events may be deleted before triggering the container. +As a result, in some scenarios such as event bursts or slow computations, events may stay in the input buffer for a while before being consumed. If the input messages in the queue are set with a timeout, such as the [MessageRetentionPeriod](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html) in SQS queues, events may be deleted before triggering the container. The implementation of the core trigger behavior is **input-agnostic**. It is therefore your responsibility to configure the input buffers according to your use case to avoid losing events. diff --git a/serverless/functions/api-cli/deploy-function-api.mdx b/serverless/functions/api-cli/deploy-function-api.mdx index 2c0e026a49..6039d1d6db 100644 --- a/serverless/functions/api-cli/deploy-function-api.mdx +++ b/serverless/functions/api-cli/deploy-function-api.mdx @@ -23,7 +23,7 @@ Refer to the [Scaleway Developers website](https://www.scaleway.com/en/developer - A Scaleway account logged into the [console](https://console.scaleway.com/) - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization - A valid [API key](/identity-and-access-management/iam/how-to/create-api-keys/) -- Installed [curl](https://curl.se/download.html/) +- Installed [curl](https://curl.se/download.html) - [Made your first request](https://www.scaleway.com/en/developers/api/#quickstart:-first-request/) using the Scaleway API 1. Run the following command in your terminal to create a functions namespace: diff --git a/serverless/functions/reference-content/code-examples.mdx b/serverless/functions/reference-content/code-examples.mdx index ce71d36331..02c993335f 100644 --- a/serverless/functions/reference-content/code-examples.mdx +++ b/serverless/functions/reference-content/code-examples.mdx @@ -509,7 +509,7 @@ exports.handle = async function (event, context) { ### Using ES Modules -Node has two module systems: `CommonJS` modules and `ECMAScript` (`ES`) modules. By default, Node treats your code files as CommonJS modules, however [ES modules](https://nodejs.org/api/esm.html/) are also available on Scaleway Serverless Functions. ES modules give you a more modern way to reuse your code. +Node has two module systems: `CommonJS` modules and `ECMAScript` (`ES`) modules. By default, Node treats your code files as CommonJS modules, however [ES modules](https://nodejs.org/api/esm.html) are also available on Scaleway Serverless Functions. ES modules give you a more modern way to reuse your code. According to the official documentation, you can specify the module type in `package.json` to use ES modules, as in the following example: @@ -536,7 +536,7 @@ function handle(event, context, cb) { The use of ES modules is encouraged, since they are more efficient and make setup and debugging much easier. -Using `"type": "module"` or `"type": "commonjs"` in your package.json file will enable/disable some features in Node runtime. For a comprehensive list of differences, refer to the [official documentation](https://nodejs.org/api/esm.html/), the following is a summary only: +Using `"type": "module"` or `"type": "commonjs"` in your package.json file will enable/disable some features in Node runtime. For a comprehensive list of differences, refer to the [official documentation](https://nodejs.org/api/esm.html), the following is a summary only: - `commonjs` is used as default value - `commonjs` allows you to use `require/module.exports` (synchronous code loading that copies all file contents) - `module` allows you to use `import/export` ES6 instructions (optimized asynchronous loading that imports only the pieces of code you need) diff --git a/serverless/functions/reference-content/configure-trigger-inputs.mdx b/serverless/functions/reference-content/configure-trigger-inputs.mdx index 2d8a13f59d..684834a7ce 100644 --- a/serverless/functions/reference-content/configure-trigger-inputs.mdx +++ b/serverless/functions/reference-content/configure-trigger-inputs.mdx @@ -24,7 +24,7 @@ Triggers get events from an input, such as an [SQS queue](/serverless/messaging/ Triggers only keep a buffer of the messages that are in-flight, they do not drain all the messages of the input in advance. -As a result, in some scenarios such as event bursts or slow computations, events may stay in the input buffer for a while before being consumed. If the input messages in the queue are set with a timeout, such as the [MessageRetentionPeriod](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html/) in SQS queues, events may be deleted before triggering the function. +As a result, in some scenarios such as event bursts or slow computations, events may stay in the input buffer for a while before being consumed. If the input messages in the queue are set with a timeout, such as the [MessageRetentionPeriod](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html) in SQS queues, events may be deleted before triggering the function. The implementation of the core trigger behavior is input agnostic, it is, therefore, your responsibility to configure the input buffers according to your use case to avoid losing events. diff --git a/serverless/messaging/api-cli/connect-aws-cli.mdx b/serverless/messaging/api-cli/connect-aws-cli.mdx index 7a8cb17ca3..173ca01d85 100644 --- a/serverless/messaging/api-cli/connect-aws-cli.mdx +++ b/serverless/messaging/api-cli/connect-aws-cli.mdx @@ -25,7 +25,7 @@ This guide shows you how to install the AWS-CLI and configure it to connect to S - Valid [credentials](/serverless/messaging/how-to/create-credentials/) for SQS/SNS - This page assumes you will use the AWS-CLI v1. For v2, see the [relevant AWS-CLI documentation page](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html/). + This page assumes you will use the AWS-CLI v1. For v2, see the [relevant AWS-CLI documentation page](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). ## How to install the AWS-CLI diff --git a/serverless/messaging/api-cli/python-node-sns.mdx b/serverless/messaging/api-cli/python-node-sns.mdx index cb0719aebb..3a13b3887b 100644 --- a/serverless/messaging/api-cli/python-node-sns.mdx +++ b/serverless/messaging/api-cli/python-node-sns.mdx @@ -134,7 +134,7 @@ The HTTP server should receive an HTTP request with a body in json matching the } ``` -The signing certificate of the message is in the JSON of the `SigningCertURL`. This certificate is also signed by the [trust chain certificate](https://messaging.s3.fr-par.scw.cloud/fr-par/sns/sns-trust-chain.pem/) (common name `sns.mnq.srr.scw.cloud`). For more information about verifying the authenticity of the message, refer to the official [AWS documentation](https://docs.aws.amazon.com/sns/latest/dg/sns-verify-signature-of-message.html/). +The signing certificate of the message is in the JSON of the `SigningCertURL`. This certificate is also signed by the [trust chain certificate](https://messaging.s3.fr-par.scw.cloud/fr-par/sns/sns-trust-chain.pem/) (common name `sns.mnq.srr.scw.cloud`). For more information about verifying the authenticity of the message, refer to the official [AWS documentation](https://docs.aws.amazon.com/sns/latest/dg/sns-verify-signature-of-message.html). To confirm the subscription, make a request to the `SubscribeURL` using your browser or curl. @@ -169,7 +169,7 @@ sns = boto3.resource('sns', The `endpoint_url` for Scaleway Messaging and Queuing with SNS is `https://sns.mnq.fr-par.scaleway.com`. The values for the access and secret keys should be the credentials you [generated](/serverless/messaging/how-to/create-credentials/) for SNS. -Once connected to the SNS service, you can use any of the SDK's available functions. However, some functions are not [supported by Scaleway Messaging and Queuing](/serverless/messaging/reference-content/sns-support/), so do check the link to make sure. See the [official SDK documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html/) for more information, or keep reading for some code examples. +Once connected to the SNS service, you can use any of the SDK's available functions. However, some functions are not [supported by Scaleway Messaging and Queuing](/serverless/messaging/reference-content/sns-support/), so do check the link to make sure. See the [official SDK documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) for more information, or keep reading for some code examples. ### Create topic (Python) @@ -296,7 +296,7 @@ Once connected to the SNS service, you can use any of the SDK's available functi ### Create topic (NodeJS) -You can find all available parameters for `createTopic` in the [AWS documentation](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-sns/classes/createtopiccommand.html/). +You can find all available parameters for `createTopic` in the [AWS documentation](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-sns/classes/createtopiccommand.html). ```javascript var paramsTopic = { @@ -312,7 +312,7 @@ console.log(topicARN); Be careful: messages sent to topics with no subscriptions are automatically deleted. -This code sample demonstrates how to send a message with `MessageAttributes`. For more information on MessageAttributes, refer to [the official documentation](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-sns/classes/publishcommand.html/). +This code sample demonstrates how to send a message with `MessageAttributes`. For more information on MessageAttributes, refer to [the official documentation](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-sns/classes/publishcommand.html). ```javascript var paramsSend = { @@ -327,7 +327,7 @@ console.log(restPublishCommand.MessageId); ### Subscribe to a topic (NodeJS) -You can find all available parameters for the subscribe operation in the [AWS documentation] (https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-sns/classes/subscribecommand.html/) +You can find all available parameters for the subscribe operation in the [AWS documentation] (https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-sns/classes/subscribecommand.html) #### Subscribe to an SQS queue from the same Scaleway Project diff --git a/serverless/messaging/api-cli/python-node-sqs.mdx b/serverless/messaging/api-cli/python-node-sqs.mdx index b455cc7c0a..69d767e3f5 100644 --- a/serverless/messaging/api-cli/python-node-sqs.mdx +++ b/serverless/messaging/api-cli/python-node-sqs.mdx @@ -118,7 +118,7 @@ sqs = boto3.resource('sqs', The `endpoint_url` for Scaleway Messaging and Queuing with SQS is `https://sqs.mnq.fr-par.scaleway.com`. For the access and secret key values, use the credentials you [generated](/serverless/messaging/how-to/create-credentials/) for your SQS. -Once connected to the SQS service, you can use any functions available with the SDK - just check that they're [supported by Scaleway Messaging and Queuing](/serverless/messaging/reference-content/sqs-support/). See the [official documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html/) for more details, or read on to see some examples. +Once connected to the SQS service, you can use any functions available with the SDK - just check that they're [supported by Scaleway Messaging and Queuing](/serverless/messaging/reference-content/sqs-support/). See the [official documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) for more details, or read on to see some examples. ### Create queue (Python) @@ -175,7 +175,7 @@ var sqsClient = new SQSClient({ The `endpoint_url` for Scaleway Messaging and Queuing with SQS is `https://sqs.mnq.fr-par.scaleway.com`. For the access and secret key values, use the credentials you [generated](/serverless/messaging/how-to/create-credentials/) for SQS. -Once connected to the SQS service, you can use any of the SDK's functions as long as they're [supported by Scaleway Messaging and Queuing](/serverless/messaging/reference-content/sqs-support/). Refer to AWS's [official documentation](https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/SQS.html/)) for more information, or read on to see some examples. +Once connected to the SQS service, you can use any of the SDK's functions as long as they're [supported by Scaleway Messaging and Queuing](/serverless/messaging/reference-content/sqs-support/). Refer to AWS's [official documentation](https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/SQS.html)) for more information, or read on to see some examples. ### Create queue (NodeJS) diff --git a/serverless/messaging/reference-content/sns-overview.mdx b/serverless/messaging/reference-content/sns-overview.mdx index 3df7cec33a..4851c4f5d1 100644 --- a/serverless/messaging/reference-content/sns-overview.mdx +++ b/serverless/messaging/reference-content/sns-overview.mdx @@ -54,7 +54,7 @@ All further actions such as sending messages etc. can be done using the AWS CLI The following resources may be helpful: - - [Official SNS Documentation](https://docs.aws.amazon.com/sns/latest/dg/welcome.html/) + - [Official SNS Documentation](https://docs.aws.amazon.com/sns/latest/dg/welcome.html) - Further information on supported API features: - [SNS](/serverless/messaging/reference-content/sns-support/) - [Creating a simulated CPU monitor notification system with Scaleway Messaging and Queuing SNS](/tutorials/sns-instances-notification-system/) diff --git a/serverless/messaging/reference-content/sqs-overview.mdx b/serverless/messaging/reference-content/sqs-overview.mdx index ff53e8b782..51176cd3f8 100644 --- a/serverless/messaging/reference-content/sqs-overview.mdx +++ b/serverless/messaging/reference-content/sqs-overview.mdx @@ -50,7 +50,7 @@ All further actions such as sending messages can be done using the AWS CLI or AW The following resources may be helpful: - [How to create a serverless scraping architecture, with Scaleway Messaging and Queuing SQS, Serverless Functions and Managed Database](/tutorials/create-serverless-scraping/) - - [Official SQS Documentation](https://docs.aws.amazon.com/sqs/index.html/) + - [Official SQS Documentation](https://docs.aws.amazon.com/sqs/index.html) - Further information on supported API features: - [SQS](/serverless/messaging/reference-content/sqs-support/) - [Official AWS CLI Documentation](https://aws.amazon.com/cli/) diff --git a/serverless/sql-databases/api-cli/import-data-to-serverless-sql-databases.mdx b/serverless/sql-databases/api-cli/import-data-to-serverless-sql-databases.mdx index ba735aed01..0d589313b0 100644 --- a/serverless/sql-databases/api-cli/import-data-to-serverless-sql-databases.mdx +++ b/serverless/sql-databases/api-cli/import-data-to-serverless-sql-databases.mdx @@ -36,7 +36,7 @@ The import procedure depends on your data source: #### Prerequisites -To complete this procedure, you must have installed PostgreSQL 16 (or newer) with [pg_dump](https://www.postgresql.org/docs/current/app-pgdump.html/) and [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html/) (bundled with the default PostgreSQL installation). +To complete this procedure, you must have installed PostgreSQL 16 (or newer) with [pg_dump](https://www.postgresql.org/docs/current/app-pgdump.html) and [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html) (bundled with the default PostgreSQL installation). #### Downloading and importing data into a Serverless SQL Database @@ -134,7 +134,7 @@ If the process fails and some data was already partly transferred, we suggest th To complete this procedure, you must have: -- Installed PostgreSQL 16 (or newer) with [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html/) (bundled with the default PostgreSQL installation). +- Installed PostgreSQL 16 (or newer) with [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html) (bundled with the default PostgreSQL installation). - A backup file for your database (named `my-backup` in the following procedure). @@ -158,7 +158,7 @@ To complete this procedure, you must have: To complete this procedure, you must have: -- Installed PostgreSQL 16 (or newer) with [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html/) (bundled with the default PostgreSQL installation). +- Installed PostgreSQL 16 (or newer) with [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html) (bundled with the default PostgreSQL installation). - A data file corresponding to a single table (named `my-table.csv` in the following procedure). @@ -199,7 +199,7 @@ You can create a `.csv` file from an existing PostgreSQL table with the [psql \c ``` - The PostgreSQL [COPY command](https://www.postgresql.org/docs/current/sql-copy.html/) cannot be used directly, as it requires the source file to be available on the PostgreSQL instance itself. + The PostgreSQL [COPY command](https://www.postgresql.org/docs/current/sql-copy.html) cannot be used directly, as it requires the source file to be available on the PostgreSQL instance itself. 5. When finished, make sure your data is stored in your new database by [connecting to it](/serverless/sql-databases/how-to/connect-to-a-database/), and performing a query. diff --git a/serverless/sql-databases/api-cli/secure-connection-ssl-tls.mdx b/serverless/sql-databases/api-cli/secure-connection-ssl-tls.mdx index c6c5dc72e4..9501452ce8 100644 --- a/serverless/sql-databases/api-cli/secure-connection-ssl-tls.mdx +++ b/serverless/sql-databases/api-cli/secure-connection-ssl-tls.mdx @@ -51,13 +51,13 @@ Your full connection string should be the output of this command: echo "postgresql://{username}:{password}@{host}:{port}/{databasename}?sslmode=verify-ca&sslrootcert=$(echo ~/.postgresql/isrgx1root.pem)" ``` -Refer to the official [PostgreSQL documentation](https://www.postgresql.org/docs/current/libpq-ssl.html/) for more information. +Refer to the official [PostgreSQL documentation](https://www.postgresql.org/docs/current/libpq-ssl.html) for more information. ## Examples by SQL Drivers ### Python/psycopg2 -As [psycopg2](https://pypi.org/project/psycopg2/) uses [libpq](https://www.postgresql.org/docs/current/libpq.html/), the same official PostgreSQL parameter can be used. +As [psycopg2](https://pypi.org/project/psycopg2/) uses [libpq](https://www.postgresql.org/docs/current/libpq.html), the same official PostgreSQL parameter can be used. Edit your connection parameters to add `sslmode=verify-full` and `sslrootcert=system` as shown below: ```python @@ -176,7 +176,7 @@ db, err := sql.Open("postgres", connString) ### PHP/pgsql -As the default PostgreSQL driver bundled with PHP, [pgsql](https://www.php.net/manual/en/book.pgsql.php/) uses [libpq](https://www.postgresql.org/docs/current/libpq.html/). The same official PostgreSQL parameter can therefore be used. +As the default PostgreSQL driver bundled with PHP, [pgsql](https://www.php.net/manual/en/book.pgsql.php/) uses [libpq](https://www.postgresql.org/docs/current/libpq.html). The same official PostgreSQL parameter can therefore be used. To ensure SSL/TLS is enforced and the server certificate is valid, add `sslmode=verify-full` and `sslrootcert=system` to your connection parameters: @@ -259,7 +259,7 @@ fn main() { ### psql -As the official client bundled with PostgreSQL, [psql](https://www.postgresql.org/docs/current/app-psql.html/) supports the default PostgreSQL connections parameters. +As the official client bundled with PostgreSQL, [psql](https://www.postgresql.org/docs/current/app-psql.html) supports the default PostgreSQL connections parameters. Edit your connection parameters to add `sslmode=verify-full` and `sslrootcert=system` parameters: diff --git a/serverless/sql-databases/how-to/manage-backups.mdx b/serverless/sql-databases/how-to/manage-backups.mdx index 45dc65c6f9..8e986a0f11 100644 --- a/serverless/sql-databases/how-to/manage-backups.mdx +++ b/serverless/sql-databases/how-to/manage-backups.mdx @@ -67,7 +67,7 @@ Serverless SQL Databases are automatically backed up every day at the same time. 4. Click the **Download** button once your export is prepared to download your database backup in a `.pg_dump` format. - You can restore the downloaded backup using the [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html/) utility. + You can restore the downloaded backup using the [pg_restore](https://www.postgresql.org/docs/current/app-pgrestore.html) utility. diff --git a/serverless/sql-databases/quickstart.mdx b/serverless/sql-databases/quickstart.mdx index 3220881a10..9e651efc4c 100644 --- a/serverless/sql-databases/quickstart.mdx +++ b/serverless/sql-databases/quickstart.mdx @@ -21,7 +21,7 @@ This page explains how to create, access, and delete a Serverless SQL Database u - A Scaleway account logged into the [console](https://console.scaleway.com/) - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization -- Installed the [psql CLI tool](https://www.postgresql.org/docs/14/app-psql.html/) (built-in with [PostgreSQL](https://www.postgresql.org/download/)). +- Installed the [psql CLI tool](https://www.postgresql.org/docs/14/app-psql.html) (built-in with [PostgreSQL](https://www.postgresql.org/download/)). ## How to create a database diff --git a/serverless/sql-databases/reference-content/serverless-sql-databases-overview.mdx b/serverless/sql-databases/reference-content/serverless-sql-databases-overview.mdx index 28b15a36cf..68434df3b3 100644 --- a/serverless/sql-databases/reference-content/serverless-sql-databases-overview.mdx +++ b/serverless/sql-databases/reference-content/serverless-sql-databases-overview.mdx @@ -91,5 +91,5 @@ The following actions must be performed by you directly: - SQL request size is limited to 512 KB. A single SQL query sent to a Serverless SQL Database cannot exceed 512 KB, but the response to the query is not limited and results tables can exceed 512 KB. -- The total size of prepared statements is limited to 512 KB per client connection. Refer to the [official PostgreSQ documentation](https://www.postgresql.org/docs/current/sql-prepare.html/) for more information on prepared statements. +- The total size of prepared statements is limited to 512 KB per client connection. Refer to the [official PostgreSQ documentation](https://www.postgresql.org/docs/current/sql-prepare.html) for more information on prepared statements. diff --git a/serverless/sql-databases/troubleshooting/maximum-prepared-statements-reached.mdx b/serverless/sql-databases/troubleshooting/maximum-prepared-statements-reached.mdx index 488f9fed8b..acb1bc3f21 100644 --- a/serverless/sql-databases/troubleshooting/maximum-prepared-statements-reached.mdx +++ b/serverless/sql-databases/troubleshooting/maximum-prepared-statements-reached.mdx @@ -23,7 +23,7 @@ FATAL: failed to prepare statement: adding the prepared statement would exceed t ### Cause -The total size of [prepared statements](https://www.postgresql.org/docs/current/sql-prepare.html/) on Serverless SQL Databases is limited to 524288 bytes (512 kibibytes) for a single client connection. This limit can be reached for two reasons: +The total size of [prepared statements](https://www.postgresql.org/docs/current/sql-prepare.html) on Serverless SQL Databases is limited to 524288 bytes (512 kibibytes) for a single client connection. This limit can be reached for two reasons: - You (or the PostgreSQL client you are using) created too many prepared statements in a single PostgreSQL connection. @@ -31,7 +31,7 @@ The total size of [prepared statements](https://www.postgresql.org/docs/current/ ### Solution -- If you (or the PostgreSQL client you are using) created too many prepared statements in a single PostgreSQL connection, reduce the number of prepared statements, or use the [deallocate](https://www.postgresql.org/docs/current/sql-deallocate.html/) feature to remove prepared statements in an active session: +- If you (or the PostgreSQL client you are using) created too many prepared statements in a single PostgreSQL connection, reduce the number of prepared statements, or use the [deallocate](https://www.postgresql.org/docs/current/sql-deallocate.html) feature to remove prepared statements in an active session: 1. Execute the command below to list the prepared statements in your current session: ```sh @@ -47,5 +47,5 @@ The total size of [prepared statements](https://www.postgresql.org/docs/current/ - If you (or the PostgreSQL client you are using) created a single prepared statement that exceeds the maximum size, remove the query causing the issue, or split it into multiple statements. -This issue is usually caused by long single queries, exceeding thousands of characters, such as thousands of values in a single `INSERT` statement, or queries using [Large Objects](https://www.postgresql.org/docs/current/largeobjects.html/). +This issue is usually caused by long single queries, exceeding thousands of characters, such as thousands of values in a single `INSERT` statement, or queries using [Large Objects](https://www.postgresql.org/docs/current/largeobjects.html). \ No newline at end of file diff --git a/storage/object/api-cli/enable-sse-c.mdx b/storage/object/api-cli/enable-sse-c.mdx index a7af936bd7..ac8fe41c7b 100644 --- a/storage/object/api-cli/enable-sse-c.mdx +++ b/storage/object/api-cli/enable-sse-c.mdx @@ -143,5 +143,5 @@ The [AWS S3 CLI](https://awscli.amazonaws.com/v2/documentation/api/latest/refere ``` -Refer to the [official AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/cp.html/) for more information on the `aws s3 cp` command. +Refer to the [official AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/cp.html) for more information on the `aws s3 cp` command. diff --git a/storage/object/api-cli/generate-aws4-auth-signature.mdx b/storage/object/api-cli/generate-aws4-auth-signature.mdx index d6f503fba3..987a46b0de 100644 --- a/storage/object/api-cli/generate-aws4-auth-signature.mdx +++ b/storage/object/api-cli/generate-aws4-auth-signature.mdx @@ -21,7 +21,7 @@ Requests sent to the Object Storage API require an HTTP Authorization header. - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization - Currently, the [AWS v4 signature type](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html/) is supported. + Currently, the [AWS v4 signature type](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) is supported. When using a client library such as [aws-cli](https://aws.amazon.com/cli/), [s3cmd](https://s3tools.org/s3cmd/) or [s3fs](https://github.com/s3fs-fuse/s3fs-fuse/), signatures are automatically generated by the library for you. diff --git a/storage/object/api-cli/installing-minio-client.mdx b/storage/object/api-cli/installing-minio-client.mdx index 37ca04b567..f7f979d4d5 100644 --- a/storage/object/api-cli/installing-minio-client.mdx +++ b/storage/object/api-cli/installing-minio-client.mdx @@ -14,7 +14,7 @@ categories: - object-storage --- -The [MinIO Client](https://min.io/docs/minio/linux/reference/minio-mc.html/) (`mc`) is a command-line tool that allows you to manage your s3 projects, providing a modern alternative to UNIX commands. +The [MinIO Client](https://min.io/docs/minio/linux/reference/minio-mc.html) (`mc`) is a command-line tool that allows you to manage your s3 projects, providing a modern alternative to UNIX commands. diff --git a/storage/object/api-cli/object-storage-aws-cli.mdx b/storage/object/api-cli/object-storage-aws-cli.mdx index 0f539d80a0..dfcdcb0532 100644 --- a/storage/object/api-cli/object-storage-aws-cli.mdx +++ b/storage/object/api-cli/object-storage-aws-cli.mdx @@ -14,7 +14,7 @@ categories: - object-storage --- -The AWS-CLI is an open-source tool built on top of the [AWS SDK for Python (Boto)](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html/) that provides commands for interacting with AWS services. With minimal configuration, you can start using all the functionalities provided by AWS Management. +The AWS-CLI is an open-source tool built on top of the [AWS SDK for Python (Boto)](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) that provides commands for interacting with AWS services. With minimal configuration, you can start using all the functionalities provided by AWS Management. @@ -23,7 +23,7 @@ The AWS-CLI is an open-source tool built on top of the [AWS SDK for Python (Boto - A valid [API key](/identity-and-access-management/iam/how-to/create-api-keys/) - This page uses AWS-CLI v1. If you want to follow the installation procedure for v2, see the [AWS-CLI documentation page](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html/). + This page uses AWS-CLI v1. If you want to follow the installation procedure for v2, see the [AWS-CLI documentation page](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). ## How to install the AWS-CLI diff --git a/storage/object/api-cli/post-object.mdx b/storage/object/api-cli/post-object.mdx index b4e472aa4b..3c5bec78e3 100644 --- a/storage/object/api-cli/post-object.mdx +++ b/storage/object/api-cli/post-object.mdx @@ -54,7 +54,7 @@ The POST policy always contains the expiration and conditions elements. ## Expiration -The expiration element specifies the expiration date and time of the POST policy in [ISO 8601 GMT](https://www.iso.org/iso-8601-date-and-time-format.html/) date format. For example, `2019-09-19T12:00:00.000Z` specifies that the POST policy is not valid after midnight GMT on September 19, 2019. +The expiration element specifies the expiration date and time of the POST policy in [ISO 8601 GMT](https://www.iso.org/iso-8601-date-and-time-format.html) date format. For example, `2019-09-19T12:00:00.000Z` specifies that the POST policy is not valid after midnight GMT on September 19, 2019. ## Condition matching diff --git a/storage/object/troubleshooting/cannot-access-data.mdx b/storage/object/troubleshooting/cannot-access-data.mdx index 2a8e017c8b..6f98805ed5 100644 --- a/storage/object/troubleshooting/cannot-access-data.mdx +++ b/storage/object/troubleshooting/cannot-access-data.mdx @@ -30,7 +30,7 @@ I am experiencing issues while trying to access my buckets and objects stored on - [Rclone](https://rclone.org/docs/#logging/) - [S3cmd](https://s3tools.org/usage/) - [MinIO Client](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-logs.html#mc-admin-logs/) - - [AWS CLI](https://docs.aws.amazon.com/cli/latest/reference/logs/get-log-events.html/) + - [AWS CLI](https://docs.aws.amazon.com/cli/latest/reference/logs/get-log-events.html) - Make sure there is no [bucket policy](/storage/object/concepts/#bucket-policy) that prevents you from deleting the bucket. If there is one, make sure that it [explicitly allows you](/storage/object/api-cli/bucket-policy/) to perform the desired action. diff --git a/storage/object/troubleshooting/low-performance.mdx b/storage/object/troubleshooting/low-performance.mdx index 8eb5ce5698..09dd50a8d5 100644 --- a/storage/object/troubleshooting/low-performance.mdx +++ b/storage/object/troubleshooting/low-performance.mdx @@ -30,7 +30,7 @@ I am noticing decreased throughputs, timeouts, high latency, and overall instabi - [Rclone](https://rclone.org/docs/#logging/) - [S3cmd](https://s3tools.org/usage/) - [MinIO Client](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-logs.html#mc-admin-logs/) - - [AWS CLI](https://docs.aws.amazon.com/cli/latest/reference/logs/get-log-events.html/) + - [AWS CLI](https://docs.aws.amazon.com/cli/latest/reference/logs/get-log-events.html) ## Going further diff --git a/tutorials/add-disk-configure-datastore-esxi/index.mdx b/tutorials/add-disk-configure-datastore-esxi/index.mdx index a3167263c2..eaf5684aee 100644 --- a/tutorials/add-disk-configure-datastore-esxi/index.mdx +++ b/tutorials/add-disk-configure-datastore-esxi/index.mdx @@ -21,7 +21,7 @@ We suggest considering alternative hypervisors such as Proxmox. You can learn ho With [Dedibox](https://www.scaleway.com/en/dedibox/), you have the flexibility to order [additional disks](https://www.scaleway.com/en/dedibox/storage/) to increase the storage capacity of your machine. -These disks serve as invaluable resources, particularly when used as an additional Datastore within [ESXi](https://www.vmware.com/products/esxi-and-esx.html/) setups for storing virtual machines and related data. +These disks serve as invaluable resources, particularly when used as an additional Datastore within [ESXi](https://www.vmware.com/products/esxi-and-esx.html) setups for storing virtual machines and related data. Our offers include a diverse selection of SATA, SAS, and SSD disks, catering to various performance and storage needs. diff --git a/tutorials/ansible-bionic-beaver/index.mdx b/tutorials/ansible-bionic-beaver/index.mdx index fc8a2454e5..f0907b7deb 100644 --- a/tutorials/ansible-bionic-beaver/index.mdx +++ b/tutorials/ansible-bionic-beaver/index.mdx @@ -150,7 +150,7 @@ Next, we'll configure the Ansible server to connect to these hosts using Ansible ## Configuring Ansible Hosts -1. Ansible tracks of all the servers through an [inventory file](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html/). We need to set up this file first before we can communicate with our other computers. +1. Ansible tracks of all the servers through an [inventory file](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html). We need to set up this file first before we can communicate with our other computers. On your Ansible server, open the file @@ -205,7 +205,7 @@ The `all` means all hosts listed in the hosts file. However, it is also possible - specify an individual host: `ansible -m ping host1` - specify multiple hosts by separating them with colons: `ansible -m ping host1:host2` -For more information on Ansible commands or playbook, refer to the official [Ansible documentation](https://docs.ansible.com/ansible/2.4/ansible-playbook.html/). +For more information on Ansible commands or playbook, refer to the official [Ansible documentation](https://docs.ansible.com/ansible/2.4/ansible-playbook.html). ## Going further diff --git a/tutorials/ansible-galaxy/index.mdx b/tutorials/ansible-galaxy/index.mdx index 72cec2652f..45b8fc6e6e 100644 --- a/tutorials/ansible-galaxy/index.mdx +++ b/tutorials/ansible-galaxy/index.mdx @@ -60,7 +60,7 @@ The LAMP server is now created. Apache Solr is a fast open-source Java search server. Solr enables you to easily create search engines that search websites, databases, and files. - For more information on Solr, refer to the [Solr official documentation](http://lucene.apache.org/solr/news.html/). + For more information on Solr, refer to the [Solr official documentation](http://lucene.apache.org/solr/news.html). 1. Install Java, tomcat6, Solr with an ansible-galaxy command. We will use the role of `geerlinguy` who is a software developer involved in many open-source development communities. Check [geerlinguy](https://galaxy.ansible.com/geerlingguy/) for many other roles. diff --git a/tutorials/arqbackup-pc-mac/index.mdx b/tutorials/arqbackup-pc-mac/index.mdx index cec822f496..8f79525098 100644 --- a/tutorials/arqbackup-pc-mac/index.mdx +++ b/tutorials/arqbackup-pc-mac/index.mdx @@ -26,7 +26,7 @@ In this tutorial you learn how to back up your PC or Mac computer to Object Stor ## Installing and configuring Arq Backup -1. Open your web browser and go to the [Arq website](https://www.arqbackup.com/index.html/) and download the application for your operating system. +1. Open your web browser and go to the [Arq website](https://www.arqbackup.com/index.html) and download the application for your operating system. 2. Double-click the downloaded file to open the setup tool. Follow the instructions given by the setup wizard to complete the installation of the application. 3. Start the Arq application once it is installed. Enter your license code or start the free trial: diff --git a/tutorials/automate-tasks-using-cron/index.mdx b/tutorials/automate-tasks-using-cron/index.mdx index c2fe0f21d2..e7da995b34 100644 --- a/tutorials/automate-tasks-using-cron/index.mdx +++ b/tutorials/automate-tasks-using-cron/index.mdx @@ -171,4 +171,4 @@ crontab -r -i When prompted, press `y` to confirm the deletion, or `n` to cancel the process. -For more information about cron jobs, refer to the [official documentation](https://manpages.ubuntu.com/manpages/focal/en/man8/cron.8.html/). \ No newline at end of file +For more information about cron jobs, refer to the [official documentation](https://manpages.ubuntu.com/manpages/focal/en/man8/cron.8.html). \ No newline at end of file diff --git a/tutorials/back-up-postgresql-barman/index.mdx b/tutorials/back-up-postgresql-barman/index.mdx index 56524b1c0e..f8f908e99e 100644 --- a/tutorials/back-up-postgresql-barman/index.mdx +++ b/tutorials/back-up-postgresql-barman/index.mdx @@ -100,7 +100,7 @@ During the installation of Barman on the server `pgsql-backup`, a system user ac ``` - The data format for the .pgpass file is `hostname:port:database:username:password`. If an asterisk is used in any of the first four fields, it will correspond to everything. The username refers to the PostgreSQL user that was created previously, not to the Linux user account. You may refer to the [official documentation](https://www.postgresql.org/docs/current/static/libpq-pgpass.html/) for more information about this file. + The data format for the .pgpass file is `hostname:port:database:username:password`. If an asterisk is used in any of the first four fields, it will correspond to everything. The username refers to the PostgreSQL user that was created previously, not to the Linux user account. You may refer to the [official documentation](https://www.postgresql.org/docs/current/static/libpq-pgpass.html) for more information about this file. diff --git a/tutorials/collecting-visualizing-logs-elastic-stack/index.mdx b/tutorials/collecting-visualizing-logs-elastic-stack/index.mdx index 4d36bbcf74..2c4aeedbcb 100644 --- a/tutorials/collecting-visualizing-logs-elastic-stack/index.mdx +++ b/tutorials/collecting-visualizing-logs-elastic-stack/index.mdx @@ -92,7 +92,7 @@ Its components are: It is important to secure your ELK Stack, especially if it is exposed to the public internet. You can complete your setup using the following additional resources: - [Use a firewal](/tutorials/installation-uncomplicated-firewall/) like `ufw` or `iptables` to restrict access to your Instance. -- [Secure Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-minimal-setup.html/) using its built-in security features or with plugins. +- [Secure Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-minimal-setup.html) using its built-in security features or with plugins. - Consider setting up an [HTTPS reverse proxy](/tutorials/nginx-reverse-proxy/) using a third-party web server like Nginx or Apache to access Kibana securely. ## Test the installation @@ -103,4 +103,4 @@ Make sure everything is working: - Kibana: Navigate to `http://your_server_ip:5601` in your web browser. Now, you should have a basic Elastic stack up and running! Adjust configurations as needed for your specific use case and further secure and optimize your setup for production use. -Refer to the [official Elastic documentation](https://www.elastic.co/guide/index.html/) for the most accurate and up-to-date instructions and advanced configuration information. \ No newline at end of file +Refer to the [official Elastic documentation](https://www.elastic.co/guide/index.html) for the most accurate and up-to-date instructions and advanced configuration information. \ No newline at end of file diff --git a/tutorials/configure-apache-kafka/index.mdx b/tutorials/configure-apache-kafka/index.mdx index 9903a1c03f..e10dfaec05 100644 --- a/tutorials/configure-apache-kafka/index.mdx +++ b/tutorials/configure-apache-kafka/index.mdx @@ -258,7 +258,7 @@ The line appears in the sink file as well as the console consumer. Different connectors for various applications exist already and are [available for download](https://www.confluent.io/product/connectors/). -If you need a specific connector for your application you can [develop one](https://docs.confluent.io/current/connect/devguide.html/) by yourself. +If you need a specific connector for your application you can [develop one](https://docs.confluent.io/current/connect/devguide.html) by yourself. Kafka provides various APIs to automatize many tasks. If you want to learn more about Kafka, feel free to check their [documentation](https://kafka.apache.org/documentation/). \ No newline at end of file diff --git a/tutorials/configure-graphite/index.mdx b/tutorials/configure-graphite/index.mdx index eb4f5ea561..1a5888f4c6 100644 --- a/tutorials/configure-graphite/index.mdx +++ b/tutorials/configure-graphite/index.mdx @@ -239,5 +239,5 @@ As the data was sent with a _count_ metric, Graphite adds up the values in the l - Pushing Content from a terminal is not the usual way to send data to Graphite. Instead, you will use a tool to automatize the collection of data. - - A complete list of tools that work with Graphite is available in the [official documentation](https://graphite.readthedocs.io/en/latest/tools.html/). + - A complete list of tools that work with Graphite is available in the [official documentation](https://graphite.readthedocs.io/en/latest/tools.html). \ No newline at end of file diff --git a/tutorials/configure-nagios-monitoring/index.mdx b/tutorials/configure-nagios-monitoring/index.mdx index a9f468af25..e2298542f2 100644 --- a/tutorials/configure-nagios-monitoring/index.mdx +++ b/tutorials/configure-nagios-monitoring/index.mdx @@ -309,4 +309,4 @@ As this information is not broadcasted on the Internet, an agent has to run on t -Nagios is widely used because of its flexibility and versatility. Do not hesitate to refer to the official [documentation](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/index.html/) of the software to find more out about further configuration options. \ No newline at end of file +Nagios is widely used because of its flexibility and versatility. Do not hesitate to refer to the official [documentation](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/index.html) of the software to find more out about further configuration options. \ No newline at end of file diff --git a/tutorials/configure-netbox-managed-postgresql-database/index.mdx b/tutorials/configure-netbox-managed-postgresql-database/index.mdx index 8ebec6f46d..50ff2c1dde 100644 --- a/tutorials/configure-netbox-managed-postgresql-database/index.mdx +++ b/tutorials/configure-netbox-managed-postgresql-database/index.mdx @@ -163,14 +163,14 @@ Enter the `username`, `email`, `password`, and `password confirmation` for the u ## Configuring gunicorn -[gunicorn](https://gunicorn.org/) is a Python [WSGI](https://wsgi.readthedocs.io/en/latest/what.html/) HTTP Server for UNIX which will be used to serve the NetBox application to Nginx. +[gunicorn](https://gunicorn.org/) is a Python [WSGI](https://wsgi.readthedocs.io/en/latest/what.html) HTTP Server for UNIX which will be used to serve the NetBox application to Nginx. 1. Copy the gunicorn configuration file from the `contrib` directory to its final destination: ``` cp /opt/netbox/contrib/gunicorn.py /opt/netbox/gunicorn_config.py ``` -The configuration file shipped with the NetBox application works for most setups, however, if you need some specific settings, refer to the [gunicorn documentation](https://docs.gunicorn.org/en/stable/configure.html/). +The configuration file shipped with the NetBox application works for most setups, however, if you need some specific settings, refer to the [gunicorn documentation](https://docs.gunicorn.org/en/stable/configure.html). ## Configuring systemd diff --git a/tutorials/configure-nodemcu-iot-hub/index.mdx b/tutorials/configure-nodemcu-iot-hub/index.mdx index b6b7f10d46..7724fed2d4 100644 --- a/tutorials/configure-nodemcu-iot-hub/index.mdx +++ b/tutorials/configure-nodemcu-iot-hub/index.mdx @@ -15,7 +15,7 @@ hero: assets/scaleway_nodemcu.webp ## NodeMCU - Overview -Scaleway's IoT Hub lets your connected devices share messages. In this tutorial, we use a [NodeMCU](https://www.nodemcu.com/index_en.html/) embedded Microcontroller ([ESP8266](https://en.wikipedia.org/wiki/ESP8266/)) with WiFi, and a DHT11 temperature sensor, to collect temperature and humidity information. We use the Arduino IDE to configure the microcontroller and transfer the data via WiFi to the IoT Hub. The data is then retrieved from the IoT Hub by [Node-RED](https://nodered.org/) to allow visualization in a dashboard. +Scaleway's IoT Hub lets your connected devices share messages. In this tutorial, we use a [NodeMCU](https://www.nodemcu.com/index_en.html) embedded Microcontroller ([ESP8266](https://en.wikipedia.org/wiki/ESP8266/)) with WiFi, and a DHT11 temperature sensor, to collect temperature and humidity information. We use the Arduino IDE to configure the microcontroller and transfer the data via WiFi to the IoT Hub. The data is then retrieved from the IoT Hub by [Node-RED](https://nodered.org/) to allow visualization in a dashboard. diff --git a/tutorials/configure-smtp-relay-tem/index.mdx b/tutorials/configure-smtp-relay-tem/index.mdx index b69dd58024..da4dde5502 100644 --- a/tutorials/configure-smtp-relay-tem/index.mdx +++ b/tutorials/configure-smtp-relay-tem/index.mdx @@ -27,7 +27,7 @@ For demonstration purposes, will walk through the configuration process on a Mac - A valid [API key](/identity-and-access-management/iam/how-to/create-api-keys/) with the right [permissions](/identity-and-access-management/iam/reference-content/permission-sets/) - [Configured your domain with Transactional Email](/managed-services/transactional-email/how-to/configure-domain-with-transactional-email/) - Installed [Postfix](https://ubuntu.com/server/docs/mail-postfix/) on your local machine -- Installed [stunnel](https://www.stunnel.org/downloads.html/) on your local machine +- Installed [stunnel](https://www.stunnel.org/downloads.html) on your local machine - Opened [the mail ports on your Instance](/compute/instances/how-to/send-emails-from-your-instance/) - Configured your SMTP server diff --git a/tutorials/configure-virtual-machine-esxi/index.mdx b/tutorials/configure-virtual-machine-esxi/index.mdx index de2b5996c7..ac354dd046 100644 --- a/tutorials/configure-virtual-machine-esxi/index.mdx +++ b/tutorials/configure-virtual-machine-esxi/index.mdx @@ -26,7 +26,7 @@ Management of the virtual machines can be done through an easy-to-use web interf - A Dedibox account logged into the [console](https://console.online.net/) - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization -- A [Dedibox](http://www.scaleway.com/en/dedibox/) server that fulfills the [VMware hardware requirements](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.esxi.upgrade.doc/GUID-DEB8086A-306B-4239-BF76-E354679202FC.html/) +- A [Dedibox](http://www.scaleway.com/en/dedibox/) server that fulfills the [VMware hardware requirements](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.esxi.upgrade.doc/GUID-DEB8086A-306B-4239-BF76-E354679202FC.html) - A [valid license](https://my.vmware.com/en/group/vmware/evalcenter?p=free-esxi7&lp=default/) for ESXi (a 60-day free trial is available) - At least one [failover IP](/dedibox-network/ip-failover/concepts/#failover-ips) with a [virtual MAC address](/dedibox-network/ip-failover/concepts/#virtual-mac-address) @@ -48,7 +48,7 @@ There are two ways to install EXSi on a Dedibox server: through the [Dedibox con Your server can also be booted from an ESXi ISO file through a KVM over IP device if included in your Dedibox. This type of installation gives you the possibility of installing your own licenses or remote ISO files. -ESXi ISO images are available on VMware's ["Evaluate Products"](https://www.vmware.com/try-vmware.html/) page. +ESXi ISO images are available on VMware's ["Evaluate Products"](https://www.vmware.com/try-vmware.html) page. 1. To begin installation via KVM over IP, access the server page of your Dedibox on the console and click **Install**. 2. Click **KVM over IP**: diff --git a/tutorials/create-esxi-cluster-dedibox/index.mdx b/tutorials/create-esxi-cluster-dedibox/index.mdx index 8eb92cca19..378fd4515b 100644 --- a/tutorials/create-esxi-cluster-dedibox/index.mdx +++ b/tutorials/create-esxi-cluster-dedibox/index.mdx @@ -294,7 +294,7 @@ A cluster is a group of several machines. When a host is added to a cluster, the ## Migrating Virtual Machines -vCenter Server supports the [vMotion technology](https://www.vmware.com/products/vsphere/vmotion.html/) for live migration of virtual machines without any downtime. This allows you to: +vCenter Server supports the [vMotion technology](https://www.vmware.com/products/vsphere/vmotion.html) for live migration of virtual machines without any downtime. This allows you to: - Automatically optimize virtual machines within resource pools. - Perform hardware maintenance without scheduling downtime or disrupting business operations. @@ -326,4 +326,4 @@ The virtual machine retains its network identity and connections, ensuring a sea ## Conclusion -You have now completed the setup of a two-node ESXi cluster with RPN-SAN as a shared datastore. You got the basics of managing it using vCenter Server and vSphere and were able to move your VM between the two nodes using the vMotion feature over the RPN. For more information about VMware vSphere, refer to the [official documentation](https://docs.vmware.com/en/VMware-vSphere/index.html/). For more information about our Scaleway Dedibox dedicated servers, refer to our [product information](https://www.scaleway.com/en/dedibox/) or contact our [solution advisors](https://console.online.net/en/assistance/commercial/) by phone or ticket to find the ideal solution for your needs. \ No newline at end of file +You have now completed the setup of a two-node ESXi cluster with RPN-SAN as a shared datastore. You got the basics of managing it using vCenter Server and vSphere and were able to move your VM between the two nodes using the vMotion feature over the RPN. For more information about VMware vSphere, refer to the [official documentation](https://docs.vmware.com/en/VMware-vSphere/index.html). For more information about our Scaleway Dedibox dedicated servers, refer to our [product information](https://www.scaleway.com/en/dedibox/) or contact our [solution advisors](https://console.online.net/en/assistance/commercial/) by phone or ticket to find the ideal solution for your needs. \ No newline at end of file diff --git a/tutorials/deploy-hasura-engine-database-postgresql/index.mdx b/tutorials/deploy-hasura-engine-database-postgresql/index.mdx index c29e0a2e75..13073192d9 100644 --- a/tutorials/deploy-hasura-engine-database-postgresql/index.mdx +++ b/tutorials/deploy-hasura-engine-database-postgresql/index.mdx @@ -152,4 +152,4 @@ Hasura provides a pre-configured [repository](https://github.com/hasura/graphql- You have now deployed Hasura GraphQL Engine using Docker, connected it to a Scaleway [Database for PostgreSQL](https://www.scaleway.com/en/database/), and taken your first steps with GraphQL. -To learn more about Hasura GraphQL Engine, refer to the [official documentation](https://hasura.io/docs/1.0/graphql/manual/api-reference/graphql-api/query.html/). \ No newline at end of file +To learn more about Hasura GraphQL Engine, refer to the [official documentation](https://hasura.io/docs/1.0/graphql/manual/api-reference/graphql-api/query.html). \ No newline at end of file diff --git a/tutorials/deploy-instances-packer-terraform/index.mdx b/tutorials/deploy-instances-packer-terraform/index.mdx index 71605ba1e4..3ebd1ac937 100644 --- a/tutorials/deploy-instances-packer-terraform/index.mdx +++ b/tutorials/deploy-instances-packer-terraform/index.mdx @@ -31,8 +31,8 @@ Both applications are available for Linux, macOS, Windows, FreeBSD, and NetBSD. ## Downloading and Installing Packer -1. [Download Packer](https://www.packer.io/downloads.html/) for your operating system. If you want to define a specific configuration, you can also [build the application](https://www.packer.io/intro/getting-started/install.html#compiling-from-source/) from the source. -2. Install the application on your computer following the instructions in the Packer [downloads page](https://www.packer.io/downloads.html/). +1. [Download Packer](https://www.packer.io/downloads.html) for your operating system. If you want to define a specific configuration, you can also [build the application](https://www.packer.io/intro/getting-started/install.html#compiling-from-source/) from the source. +2. Install the application on your computer following the instructions in the Packer [downloads page](https://www.packer.io/downloads.html). 3. Verify that the application is working properly, by opening a terminal and typing `packer --help`. You see the following output: ```bash packer --help @@ -225,7 +225,7 @@ As exemplified below, we will build an Ubuntu Image with Apache preinstalled: ## Deploying Machine Images with Terraform -1. Download [Terraform](https://www.terraform.io/downloads.html/) for your operating system. +1. Download [Terraform](https://www.terraform.io/downloads.html) for your operating system. 2. Set your credentials as an environment variable on your computer. For example in bash, you have to edit the file `~/.bashrc`, for zsh the procedure is the same, but the file to edit is `~/.zshrc`: ```bash export SCW_DEFAULT_PROJECT_ID= @@ -344,4 +344,4 @@ As exemplified below, we will build an Ubuntu Image with Apache preinstalled: Apply complete! Resources: 4 added, 0 changed, 0 destroyed. ``` -To find out more about all the resources that you can manage with Terraform, check out the [official documentation](https://registry.terraform.io/providers/scaleway/scaleway/latest/docs/). For more information about Packer, refer to the official [Packer documentation](https://www.packer.io/docs/builders/scaleway.html/). \ No newline at end of file +To find out more about all the resources that you can manage with Terraform, check out the [official documentation](https://registry.terraform.io/providers/scaleway/scaleway/latest/docs/). For more information about Packer, refer to the official [Packer documentation](https://www.packer.io/docs/builders/scaleway.html). \ No newline at end of file diff --git a/tutorials/get-started-crossplane-kubernetes/index.mdx b/tutorials/get-started-crossplane-kubernetes/index.mdx index d01952f6d7..dc5528ee7b 100644 --- a/tutorials/get-started-crossplane-kubernetes/index.mdx +++ b/tutorials/get-started-crossplane-kubernetes/index.mdx @@ -23,7 +23,7 @@ In this tutorial, you will learn how to install Upbound Universal Crossplane (UX - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization - A valid [API key](/identity-and-access-management/iam/how-to/create-api-keys/) - A [Kubernetes cluster](/containers/kubernetes/how-to/create-cluster/) and downloaded its [Kubeconfig file](/containers/kubernetes/concepts#kubeconfig) -- Installed [curl](https://curl.se/download.html/) +- Installed [curl](https://curl.se/download.html) - Installed [kubectl](https://github.com/kubernetes/kops/blob/master/docs/install.md/) on your local computer diff --git a/tutorials/hadoop/index.mdx b/tutorials/hadoop/index.mdx index a52ba99579..7a0309b96a 100644 --- a/tutorials/hadoop/index.mdx +++ b/tutorials/hadoop/index.mdx @@ -64,7 +64,7 @@ For more information, refer to the [official Apache Hadoop documentation.](http: ## Installing Hadoop in standalone mode -1. Visit the [Apache Hadoop Releases page](http://hadoop.apache.org/releases.html/) to select the most recent stable release. We will install Hadoop version 3.4.0. +1. Visit the [Apache Hadoop Releases page](http://hadoop.apache.org/releases.html) to select the most recent stable release. We will install Hadoop version 3.4.0. 2. Copy the link to the release binary. 3. Use `wget` to fetch it: diff --git a/tutorials/hestiacp/index.mdx b/tutorials/hestiacp/index.mdx index fa6147769b..b6b2c74c6c 100644 --- a/tutorials/hestiacp/index.mdx +++ b/tutorials/hestiacp/index.mdx @@ -147,4 +147,4 @@ You can create additional user accounts to allow your users to manage their serv ## Going further -Hestia control panel provides also features to manage databases, cronjobs, and backups. You may refer to the [official documentation](https://hestiacp.com/docs/) for more information on how to manage these features. You will also find information on how to manage your server using the command-line tools included with Hestia control panel in the [CLI reference documentation](https://hestiacp.com/docs/reference/cli.html/). \ No newline at end of file +Hestia control panel provides also features to manage databases, cronjobs, and backups. You may refer to the [official documentation](https://hestiacp.com/docs/) for more information on how to manage these features. You will also find information on how to manage your server using the command-line tools included with Hestia control panel in the [CLI reference documentation](https://hestiacp.com/docs/reference/cli.html). \ No newline at end of file diff --git a/tutorials/install-cockroachdb-scaleway-instances/index.mdx b/tutorials/install-cockroachdb-scaleway-instances/index.mdx index d5dcce63f8..992ba60129 100644 --- a/tutorials/install-cockroachdb-scaleway-instances/index.mdx +++ b/tutorials/install-cockroachdb-scaleway-instances/index.mdx @@ -258,7 +258,7 @@ Locally, you will need to [create the following certificates and keys](https://w - A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP addresses and common names for machines running load balancers. - A client key pair for the `root` user. You will use this to run a sample workload against the cluster as well as some `cockroach` client commands from your local machine. -1. [Install CockroachDB](https://www.cockroachlabs.com/docs/install-cockroachdb.html/) on your local machine. +1. [Install CockroachDB](https://www.cockroachlabs.com/docs/install-cockroachdb.html) on your local machine. 2. Create two directories. ``` mkdir certs @@ -450,7 +450,7 @@ Use the built-in SQL client locally as follows: Despite CockroachDB's various [built-in safeguards against failure](https://www.cockroachlabs.com/docs/v23.2/frequently-asked-questions#how-does-cockroachdb-survive-failures/), it is critical to actively monitor the overall health and performance of a cluster running in production and to create alerting rules that promptly send notifications when there are events that require investigation or intervention. -You can leverage [Scaleway Cockpit](/observability/cockpit/how-to/send-metrics-logs-to-cockpit/) to set up monitoring and alerting using [CockroachDB Prometheus endpoint](https://www.cockroachlabs.com/docs/stable/monitor-cockroachdb-with-prometheus.html/) via Prometheus Remote Write capabilities. This can be done by installing Prometheus as an agent on each node. +You can leverage [Scaleway Cockpit](/observability/cockpit/how-to/send-metrics-logs-to-cockpit/) to set up monitoring and alerting using [CockroachDB Prometheus endpoint](https://www.cockroachlabs.com/docs/stable/monitor-cockroachdb-with-prometheus.html) via Prometheus Remote Write capabilities. This can be done by installing Prometheus as an agent on each node. For details about available monitoring options and the most important events and metrics to alert on, see [Monitoring and Alerting(https://www.cockroachlabs.com/docs/v23.2/monitoring-and-alerting/). diff --git a/tutorials/install-configure-couchdb/index.mdx b/tutorials/install-configure-couchdb/index.mdx index 436fc227d2..a943698c84 100644 --- a/tutorials/install-configure-couchdb/index.mdx +++ b/tutorials/install-configure-couchdb/index.mdx @@ -273,7 +273,7 @@ It will return a JSON list like the following: ``` - To learn more about managing your CouchDB via the API, check out the [official documentation](https://docs.couchdb.org/en/latest/api/index.html/). + To learn more about managing your CouchDB via the API, check out the [official documentation](https://docs.couchdb.org/en/latest/api/index.html). diff --git a/tutorials/install-php-composer/index.mdx b/tutorials/install-php-composer/index.mdx index 376dded4ae..7b35e71e2b 100644 --- a/tutorials/install-php-composer/index.mdx +++ b/tutorials/install-php-composer/index.mdx @@ -35,7 +35,7 @@ PHP Composer is a package dependency management tool for PHP similar to NPM for ``` php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" ``` -3. Verify the data integrity of the script by comparing the script SHA-384 hash with the latest installer hash found on the Composer [Public Keys/Signatures page](https://composer.github.io/pubkeys.html/). Download the expected signature of the latest Composer installer from the Composer's GitHub page and store it in a variable named `HASH`. +3. Verify the data integrity of the script by comparing the script SHA-384 hash with the latest installer hash found on the Composer [Public Keys/Signatures page](https://composer.github.io/pubkeys.html). Download the expected signature of the latest Composer installer from the Composer's GitHub page and store it in a variable named `HASH`. ``` HASH="$(wget -q -O - https://composer.github.io/installer.sig)" ``` diff --git a/tutorials/install-postgresql/index.mdx b/tutorials/install-postgresql/index.mdx index 80900089fb..0f65177ec9 100644 --- a/tutorials/install-postgresql/index.mdx +++ b/tutorials/install-postgresql/index.mdx @@ -97,7 +97,7 @@ By default, only the **Postgres** role is configured within the database. Shall the new role be allowed to create more new roles? (y/n) n ``` -More information about the usage of the `createuser` command is available in the [official documentation](https://www.postgresql.org/docs/11/static/app-createuser.html/). +More information about the usage of the `createuser` command is available in the [official documentation](https://www.postgresql.org/docs/11/static/app-createuser.html). ## Creating new databases @@ -111,7 +111,7 @@ Run the following command, after you log in as the Postgres user: createdb bill ``` -For more information, you consult the [official documentation](https://www.postgresql.org/docs/11/static/app-createdb.html/). +For more information, you consult the [official documentation](https://www.postgresql.org/docs/11/static/app-createdb.html). ## Connecting to a Postgres prompt with the new role diff --git a/tutorials/k8s-gitlab/index.mdx b/tutorials/k8s-gitlab/index.mdx index 50c5b0db17..cc66953d15 100644 --- a/tutorials/k8s-gitlab/index.mdx +++ b/tutorials/k8s-gitlab/index.mdx @@ -163,4 +163,4 @@ To demonstrate that the runner is working, we [create a repository](https://docs -If you want to learn more about running a `gitlab` runner on Kubernetes you can also check the `gitlab-ci` official [documentation](https://docs.gitlab.com/runner/install/kubernetes.html/). \ No newline at end of file +If you want to learn more about running a `gitlab` runner on Kubernetes you can also check the `gitlab-ci` official [documentation](https://docs.gitlab.com/runner/install/kubernetes.html). \ No newline at end of file diff --git a/tutorials/lb-firewall-haproxy-pfsense/index.mdx b/tutorials/lb-firewall-haproxy-pfsense/index.mdx index 8df2597f27..9ed825fd62 100644 --- a/tutorials/lb-firewall-haproxy-pfsense/index.mdx +++ b/tutorials/lb-firewall-haproxy-pfsense/index.mdx @@ -571,5 +571,5 @@ By default, pfSense provides only support for firewalling and VPN features. To s You have now configured a redundant load balancer and firewall infrastructure using pfSense and HAproxy on [Scaleway Dedibox dedicated servers](https://www.scaleway.com/en/dedibox/). To go further with the configuration of pfSense and HAproxy, refer to their official documentation: -- [Official pfSense documentation](https://docs.netgate.com/pfsense/en/latest/index.html/) +- [Official pfSense documentation](https://docs.netgate.com/pfsense/en/latest/index.html) - [Official HAproxy documentation](http://www.haproxy.org/#docs/) \ No newline at end of file diff --git a/tutorials/librenms-monitoring/index.mdx b/tutorials/librenms-monitoring/index.mdx index 59bfd44b2f..c3fc2ea707 100644 --- a/tutorials/librenms-monitoring/index.mdx +++ b/tutorials/librenms-monitoring/index.mdx @@ -284,4 +284,4 @@ LibreNMS is capable of monitoring many network devices. To monitor a remote clou 10. LibreNMS starts polling the device and creating different graphs showing the status of the device: -For more information about the configuration options of SNMP, refer to the [official documentation](https://manpages.ubuntu.com/manpages/lunar/en/man5/snmpd.conf.5.html/). You may also refer to the [official LibreNMS documentation](https://docs.librenms.org/). \ No newline at end of file +For more information about the configuration options of SNMP, refer to the [official documentation](https://manpages.ubuntu.com/manpages/lunar/en/man5/snmpd.conf.5.html). You may also refer to the [official LibreNMS documentation](https://docs.librenms.org/). \ No newline at end of file diff --git a/tutorials/magento-ubuntu/index.mdx b/tutorials/magento-ubuntu/index.mdx index 255288c56b..356140e9cc 100644 --- a/tutorials/magento-ubuntu/index.mdx +++ b/tutorials/magento-ubuntu/index.mdx @@ -302,7 +302,7 @@ By default, plain HTTP connections are not encrypted and data is transmitted in The Magento admin interface allows you to manage your customers and products, and also customize the pages of your shop. - For more information about the admin interface, refer to the official [Magento documentation](https://docs.magento.com/user-guide/configuration/configuration-basic.html/). + For more information about the admin interface, refer to the official [Magento documentation](https://docs.magento.com/user-guide/configuration/configuration-basic.html). The interface is available on a customized URL, which was displayed during the installation of your shop. diff --git a/tutorials/manage-database-instance-pgadmin4/index.mdx b/tutorials/manage-database-instance-pgadmin4/index.mdx index d60a69f683..e4d63a3ea2 100644 --- a/tutorials/manage-database-instance-pgadmin4/index.mdx +++ b/tutorials/manage-database-instance-pgadmin4/index.mdx @@ -108,4 +108,4 @@ Once installed, proceed to the configuration of pgAdmin with your Scaleway Datab ## Conclusion -You can now manage your Database Instance from a visual interface. For more information using pgAdmin, you may follow the [official documentation](https://www.pgadmin.org/docs/pgadmin4/development/release_notes_4_16.html/). \ No newline at end of file +You can now manage your Database Instance from a visual interface. For more information using pgAdmin, you may follow the [official documentation](https://www.pgadmin.org/docs/pgadmin4/development/release_notes_4_16.html). \ No newline at end of file diff --git a/tutorials/migrate-data-minio-client/index.mdx b/tutorials/migrate-data-minio-client/index.mdx index 5a8f1ee450..528bff91af 100644 --- a/tutorials/migrate-data-minio-client/index.mdx +++ b/tutorials/migrate-data-minio-client/index.mdx @@ -85,4 +85,4 @@ MinIO client provides a modern alternative to UNIX commands like ls, cat, cp, mi -For more information about MinIO client refer to the [official documentation](https://docs.minio.io/docs/minio-client-quickstart-guide.html/). \ No newline at end of file +For more information about MinIO client refer to the [official documentation](https://docs.minio.io/docs/minio-client-quickstart-guide.html). \ No newline at end of file diff --git a/tutorials/migrate-mysql-databases-postgresql-pgloader/index.mdx b/tutorials/migrate-mysql-databases-postgresql-pgloader/index.mdx index fcbfb7b04c..3efb7a3c7f 100644 --- a/tutorials/migrate-mysql-databases-postgresql-pgloader/index.mdx +++ b/tutorials/migrate-mysql-databases-postgresql-pgloader/index.mdx @@ -16,7 +16,7 @@ dates: pgLoader is an open-source database migration tool developed to simplify the process of migrating an existing database from one database engine to [PostgreSQL](https://www.postgresql.org/). -The tool supports migrations from several file types and database engines like [MySQL](https://www.mysql.com/), [MS SQL](https://www.microsoft.com/en-us/sql-server/sql-server-2019/) and [SQLite](https://www.sqlite.org/index.html/). +The tool supports migrations from several file types and database engines like [MySQL](https://www.mysql.com/), [MS SQL](https://www.microsoft.com/en-us/sql-server/sql-server-2019/) and [SQLite](https://www.sqlite.org/index.html). In this tutorial, you learn how to migrate an existing remote MySQL database to a [Database for PostgreSQL](https://www.scaleway.com/en/database/) using pgLoader and an intermediate [Development Instance](https://www.scaleway.com/en/cost-optimized-instances/) running Ubuntu Linux. diff --git a/tutorials/mlx-array-framework-apple-silicon/index.mdx b/tutorials/mlx-array-framework-apple-silicon/index.mdx index def97938c4..24efddb4e7 100644 --- a/tutorials/mlx-array-framework-apple-silicon/index.mdx +++ b/tutorials/mlx-array-framework-apple-silicon/index.mdx @@ -96,7 +96,7 @@ Efficiently compute both a function's output and gradient concerning the functio ## Going further -- For comprehensive MLX documentation, refer to the [official MLX documentation](https://ml-explore.github.io/mlx/build/html/index.html/). +- For comprehensive MLX documentation, refer to the [official MLX documentation](https://ml-explore.github.io/mlx/build/html/index.html). - The [MLX examples repository](https://github.com/ml-explore/mlx-examples/) hosts a diverse collection of examples, including: - Training a [Transformer language model](https://github.com/ml-explore/mlx-examples/tree/main/transformer_lm/). - Large-scale text generation using [LLaMA](https://github.com/ml-explore/mlx-examples/tree/main/llms/llama/) and subsequent finetuning with [LoRA](https://github.com/ml-explore/mlx-examples/tree/main/lora/). diff --git a/tutorials/nginx-reverse-proxy/index.mdx b/tutorials/nginx-reverse-proxy/index.mdx index 0b574bdaee..55766dd943 100644 --- a/tutorials/nginx-reverse-proxy/index.mdx +++ b/tutorials/nginx-reverse-proxy/index.mdx @@ -30,7 +30,7 @@ There are significant benefits to setting up an Nginx HTTPS reverse proxy: - An [SSH key](/identity-and-access-management/organizations-and-projects/how-to/create-ssh-key/) - An [Instance](/compute/instances/how-to/create-an-instance/) running on Ubuntu Bionic Beaver - A web application running on a non-standard web port on the Instance - You can, for example, install a lightweight web server like [Webfsd](http://manpages.ubuntu.com/manpages/trusty/man1/webfsd.1.html/), which runs on port 8000 by default to be reachable on the standard HTTP(s) ports via the proxy. + You can, for example, install a lightweight web server like [Webfsd](http://manpages.ubuntu.com/manpages/trusty/man1/webfsd.1.html), which runs on port 8000 by default to be reachable on the standard HTTP(s) ports via the proxy. - [Installed and configured Apache](/tutorials/configure-apache-lets-encrypt/#installing-apache) on your Instance - `sudo` privileges or access to the root user - A domain name pointing towards your server's IP (A or AAAA record) diff --git a/tutorials/nvidia-triton/index.mdx b/tutorials/nvidia-triton/index.mdx index 1caf449637..71a325000f 100644 --- a/tutorials/nvidia-triton/index.mdx +++ b/tutorials/nvidia-triton/index.mdx @@ -123,4 +123,4 @@ Your metrics are now pushed to Cockpit and you can [access Grafana](/observabili You have successfully deployed a machine learning model using NVIDIA Triton Inference Server on Scaleway Object Storage and set up metrics monitoring using the monitoring Cockpit. This tutorial highlights how Scaleway's Object Storage and AI capabilities can be combined to build and deploy powerful AI applications. -Remember that this tutorial provides just a basic overview. Refer to the official [Triton Inference Server](https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html/) documentation for more advanced configurations, security settings, and additional features to configure the application to your specific needs. \ No newline at end of file +Remember that this tutorial provides just a basic overview. Refer to the official [Triton Inference Server](https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html) documentation for more advanced configurations, security settings, and additional features to configure the application to your specific needs. \ No newline at end of file diff --git a/tutorials/odoo/index.mdx b/tutorials/odoo/index.mdx index 4b276c43d2..e9475f0d26 100644 --- a/tutorials/odoo/index.mdx +++ b/tutorials/odoo/index.mdx @@ -16,7 +16,7 @@ dates: posted: 2020-08-13 --- -Odoo is a business management software, including different modules such as CRM, billing, e-commerce, warehouse, website builder, inventory management, and accounting. The software is distributed in an open-code model, providing a free Community Edition, released under the GNU [LGPLv3](https://www.gnu.org/licenses/lgpl-3.0.en.html/) software license. The source code of the Community Edition is available on [the company's GitHub](https://github.com/odoo/odoo/). Besides the free and open-source Community Edition, a proprietary "Enterprise" version provides additional features and services. +Odoo is a business management software, including different modules such as CRM, billing, e-commerce, warehouse, website builder, inventory management, and accounting. The software is distributed in an open-code model, providing a free Community Edition, released under the GNU [LGPLv3](https://www.gnu.org/licenses/lgpl-3.0.en.html) software license. The source code of the Community Edition is available on [the company's GitHub](https://github.com/odoo/odoo/). Besides the free and open-source Community Edition, a proprietary "Enterprise" version provides additional features and services. The modular design of Odoo allows developers to create additional modules or apps and distribute them through the Odoo marketplace, which provides more than 20,000 modules. This makes Odoo a versatile and customizable solution that can be adapted to many usage scenarios. @@ -182,4 +182,4 @@ By default, the Odoo web server uses a plain HTTP connection to transport data. You can now access your Odoo via a secure HTTPS connection at `https://odoo.example.com`. -To go further, refer to the [official Odoo documentation](https://www.odoo.com/documentation/master/applications.html/). \ No newline at end of file +To go further, refer to the [official Odoo documentation](https://www.odoo.com/documentation/master/applications.html). \ No newline at end of file diff --git a/tutorials/proxmox-softraid/index.mdx b/tutorials/proxmox-softraid/index.mdx index 32110f59f6..c78f8a9db3 100644 --- a/tutorials/proxmox-softraid/index.mdx +++ b/tutorials/proxmox-softraid/index.mdx @@ -17,7 +17,7 @@ dates: Proxmox Virtual Environment - or Proxmox VE is a server virtualization platform, based on Debian Linux. It allows the deployment of management of [KVM-based](https://www.linux-kvm.org/page/Main_Page/) virtual machines or [LXC](https://linuxcontainers.org/) containers. -The tool provides a REST-API as well as a graphical web interface to manage the virtual machines and is licensed under the [GNU Affero General Public License, version 3](https://www.gnu.org/licenses/agpl-3.0.de.html/). +The tool provides a REST-API as well as a graphical web interface to manage the virtual machines and is licensed under the [GNU Affero General Public License, version 3](https://www.gnu.org/licenses/agpl-3.0.de.html). @@ -30,7 +30,7 @@ The tool provides a REST-API as well as a graphical web interface to manage the 1. Create a [KVM-over-IP](/dedibox/kvm-over-ip/quickstart/) session from your [Dedibox console](https://console.online.net/). 2. Mount the Proxmox ISO, available on the server [virtualmedia.online.net](http://virtualmedia.online.net/). The mounting procedure may differ slightly depending on the KVM module of your Dedibox. The instructions for Proxmox VE 6 can be found [here](https://virtualmedia.online.net/proxmox/proxmox-ve_6.0-1.iso/). 3. Boot the server from the ISO file by choosing the virtual CD-ROM drive in the boot options. -4. The Proxmox [EULA](https://www.gnu.org/licenses/agpl-3.0.html/) displays. Agree to it by clicking **I agree**. A summary of the Proxmox installer displays. +4. The Proxmox [EULA](https://www.gnu.org/licenses/agpl-3.0.html) displays. Agree to it by clicking **I agree**. A summary of the Proxmox installer displays. 5. Click **Options** at the bottom of the page. diff --git a/tutorials/restic-s3-backup/index.mdx b/tutorials/restic-s3-backup/index.mdx index afcfd70692..da305154dc 100644 --- a/tutorials/restic-s3-backup/index.mdx +++ b/tutorials/restic-s3-backup/index.mdx @@ -15,7 +15,7 @@ dates: posted: 2022-04-04 --- -Restic is a backup tool that allows you to back up your Linux, Windows, Mac, or BSD machines and send your backups to repositories via [different storage protocols](https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html/), including S3 (Object Storage). +Restic is a backup tool that allows you to back up your Linux, Windows, Mac, or BSD machines and send your backups to repositories via [different storage protocols](https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html), including S3 (Object Storage). In this tutorial, you learn how to backup a Scaleway Instance running on Ubuntu 20.04 using Restic and Object Storage. @@ -151,4 +151,4 @@ created restic repository da8e38a165 at s3:https://s3.fr-par.scw.cloud/:3000/` to access the Gitea application: -6. Click **Register** to start the database initialization. Gitea supports [SQLite](https://www.sqlite.org/index.html/) which makes the application very lightweight and ideal for a self-hosted development Environment. If you require more performance, it is also possible to use [MySQL/MariaDB](/tutorials/mariadb-ubuntu-bionic/) or [PostgreSQL](/tutorials/install-postgresql/). +6. Click **Register** to start the database initialization. Gitea supports [SQLite](https://www.sqlite.org/index.html) which makes the application very lightweight and ideal for a self-hosted development Environment. If you require more performance, it is also possible to use [MySQL/MariaDB](/tutorials/mariadb-ubuntu-bionic/) or [PostgreSQL](/tutorials/install-postgresql/). 7. Choose **SQLite** as database type. Leave the other pre-filled settings as they are, they are already set to the required values, and confirm the form. 8. The installation is now ready, and it is time to create the first user. Open `http://:3000/user/sign_up` in a web browser and fill in the required parameters. diff --git a/tutorials/setup-cockroachdb-cluster/index.mdx b/tutorials/setup-cockroachdb-cluster/index.mdx index 69213d3e71..cd739212cb 100644 --- a/tutorials/setup-cockroachdb-cluster/index.mdx +++ b/tutorials/setup-cockroachdb-cluster/index.mdx @@ -41,7 +41,7 @@ Below you find a schema of the architecture of CockroachDB: -- **SQL**: The SQL layer helps developers run SQL queries as in a traditional environment. It provides all the familiar terms and concepts such as schema, tables, and indexes. CockroachDB uses its own SQL feature set. Refer to the [Cockroach documentation](https://www.cockroachlabs.com/docs/stable/sql-feature-support.html/) for the complete feature set. +- **SQL**: The SQL layer helps developers run SQL queries as in a traditional environment. It provides all the familiar terms and concepts such as schema, tables, and indexes. CockroachDB uses its own SQL feature set. Refer to the [Cockroach documentation](https://www.cockroachlabs.com/docs/stable/sql-feature-support.html) for the complete feature set. - **Distributed Key-Value Store**: The SQL layer communicates with the distributed key-value store so that we can develop large tables and indexes such as HBase, BigTable, and others. - **Distributed Transactions**: Transactions are the core part of this application, their implementation of this feature manages the transition from SQL to stores and ranges. - **Node**: Nodes are the servers that store your data. They can either be virtual or physical machines. The distributed key-value store routes messages to the different nodes of our cluster. diff --git a/tutorials/setup-elastic-metal-proxmox-cluster-with-private-networks/index.mdx b/tutorials/setup-elastic-metal-proxmox-cluster-with-private-networks/index.mdx index 4ec7c0903b..3ad2c0428b 100644 --- a/tutorials/setup-elastic-metal-proxmox-cluster-with-private-networks/index.mdx +++ b/tutorials/setup-elastic-metal-proxmox-cluster-with-private-networks/index.mdx @@ -14,7 +14,7 @@ dates: posted: 2022-01-10 --- -Proxmox Virtual Environment (Proxmox VE) is a server virtualization platform, based on Debian Linux. It allows the deployment and management of [KVM-based](https://www.linux-kvm.org/page/Main_Page/) virtual machines or [LXC](https://linuxcontainers.org/) containers. The tool provides a REST API as well as a graphical web interface to manage the virtual machines. It is also licensed under the [GNU Affero General Public License, version 3](https://www.gnu.org/licenses/agpl-3.0.html/). +Proxmox Virtual Environment (Proxmox VE) is a server virtualization platform, based on Debian Linux. It allows the deployment and management of [KVM-based](https://www.linux-kvm.org/page/Main_Page/) virtual machines or [LXC](https://linuxcontainers.org/) containers. The tool provides a REST API as well as a graphical web interface to manage the virtual machines. It is also licensed under the [GNU Affero General Public License, version 3](https://www.gnu.org/licenses/agpl-3.0.html). diff --git a/tutorials/setup-jupyter-notebook/index.mdx b/tutorials/setup-jupyter-notebook/index.mdx index 437bdb52a8..a2932d1d4f 100644 --- a/tutorials/setup-jupyter-notebook/index.mdx +++ b/tutorials/setup-jupyter-notebook/index.mdx @@ -75,7 +75,7 @@ To connect to the Jupyter Notebook from a remote client, an SSH tunnel is requir ### From a Windows client -To establish an SSH tunnel on a Windows computer, you need to have [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html/) installed on the machine. +To establish an SSH tunnel on a Windows computer, you need to have [PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) installed on the machine. 1. Open the PuTTY application. 2. In the menu bar on the left, click **Connection** > **SSH** > **Tunnels**. Then add a new forwarded port: @@ -116,4 +116,4 @@ To establish an SSH tunnel on a Windows computer, you need to have [PuTTY](https 5. A running notebook is marked with a green icon. To suspend its execution, tick the corresponding box and click **Shutdown**: -For more information and advanced configuration of Jupyter Notebook, refer to the [official documentation](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/examples_index.html/). \ No newline at end of file +For more information and advanced configuration of Jupyter Notebook, refer to the [official documentation](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/examples_index.html). \ No newline at end of file diff --git a/tutorials/setup-postfix-ubuntu-bionic/index.mdx b/tutorials/setup-postfix-ubuntu-bionic/index.mdx index da12d23d2a..00c6ec766b 100644 --- a/tutorials/setup-postfix-ubuntu-bionic/index.mdx +++ b/tutorials/setup-postfix-ubuntu-bionic/index.mdx @@ -587,7 +587,7 @@ apt install redis-server servers = "127.0.0.1"; backend = "redis"; ``` -5. Set the [milter headers](https://rspamd.com/doc/modules/milter_headers.html/) in the file `/etc/rspamd/local.d/milter_headers.conf`: +5. Set the [milter headers](https://rspamd.com/doc/modules/milter_headers.html) in the file `/etc/rspamd/local.d/milter_headers.conf`: ``` use = ["x-spamd-bar", "x-spam-level", "authentication-results"]; ``` diff --git a/tutorials/sinatra/index.mdx b/tutorials/sinatra/index.mdx index 7aa8937a77..ee4f081a7b 100644 --- a/tutorials/sinatra/index.mdx +++ b/tutorials/sinatra/index.mdx @@ -16,7 +16,7 @@ dates: Sinatra is a lightweight domain-specific programming language and web application library that is used for writing web applications. It provides a faster and simpler alternative to Ruby frameworks such as Ruby on Rails. Sinatra is written in [Ruby](https://www.ruby-lang.org/en/) and the code for Sinatra applications will be written in Ruby too. -Sinatra routes browser requests to code that can handle those requests. Then it will render templates back to the browser as a response. For more information on Sinatra, refer to the [official documentation](http://sinatrarb.com/documentation.html/). +Sinatra routes browser requests to code that can handle those requests. Then it will render templates back to the browser as a response. For more information on Sinatra, refer to the [official documentation](http://sinatrarb.com/documentation.html). To use Sinatra, we need to install three pieces of software: Ruby, RubyGems, and Sinatra. Sinatra depends on Ruby and RubyGems. @@ -194,7 +194,7 @@ The last line redirects to an ERB file which automatically looks for a file call ## Available route methods in Sinatra -Route method names correspond to [HTTP 1.1](https://www.w3.org/Protocols/HTTP/1.1/draft-ietf-http-v11-spec-01.html/) request method names to make things easier to remember. +Route method names correspond to [HTTP 1.1](https://www.w3.org/Protocols/HTTP/1.1/draft-ietf-http-v11-spec-01.html) request method names to make things easier to remember. Routes are matched in the exact order that they are defined in your code, this means that the second route in the example below will never get matched because we have the same pattern further up in the code. diff --git a/tutorials/strapi/index.mdx b/tutorials/strapi/index.mdx index 1ff8da9a2c..414ecd41bc 100644 --- a/tutorials/strapi/index.mdx +++ b/tutorials/strapi/index.mdx @@ -224,9 +224,9 @@ Now you can access your Strapi admin interface, configure your app, create conte Now, even when you disconnect from your Instance, you will still be able to access your Strapi app's admin dashboard and API. - There are alternative ways to run your Strapi project with PM2, eg via an `ecosystem.config.js` file. You can also use PM2 to start Strapi on boot. See the [official PM2 documentation](https://pm2.keymetrics.io/docs/usage/application-declaration/), or the [Strapi Process Manager Documentation](https://strapi.io/documentation/3.0.0-beta.x/guides/process-manager.html/) for more information on using PM2. + There are alternative ways to run your Strapi project with PM2, eg via an `ecosystem.config.js` file. You can also use PM2 to start Strapi on boot. See the [official PM2 documentation](https://pm2.keymetrics.io/docs/usage/application-declaration/), or the [Strapi Process Manager Documentation](https://strapi.io/documentation/3.0.0-beta.x/guides/process-manager.html) for more information on using PM2. ## Conclusion -You now know how to deploy a simple Strapi app on a Scaleway Instance. Here, we've just seen the beginning of what you can do with Strapi. For more complex use cases, and more information on using the API, [visit the official Strapi documentation](https://strapi.io/documentation/v3.x/getting-started/introduction.html/). \ No newline at end of file +You now know how to deploy a simple Strapi app on a Scaleway Instance. Here, we've just seen the beginning of what you can do with Strapi. For more complex use cases, and more information on using the API, [visit the official Strapi documentation](https://strapi.io/documentation/v3.x/getting-started/introduction.html). \ No newline at end of file diff --git a/tutorials/terraform-quickstart/index.mdx b/tutorials/terraform-quickstart/index.mdx index d40e91d143..e8cf05f115 100644 --- a/tutorials/terraform-quickstart/index.mdx +++ b/tutorials/terraform-quickstart/index.mdx @@ -72,7 +72,7 @@ The installation of Terraform on Linux can be done in a few simple steps. ## Creating a first Instance using Terraform -To create a first Instance using Terraform, a declarative configuration file is required. This file contains all information on machine characteristics required to deploy. It has to be written in the Hashicorp Configuration Language (HCL). The deployment of Scaleway resources is done using the [Scaleway Provider for Terraform](https://www.terraform.io/docs/providers/scaleway/index.html/). For more information about HCL, refer to the [official documentation](https://www.terraform.io/docs/configuration/syntax.html/). +To create a first Instance using Terraform, a declarative configuration file is required. This file contains all information on machine characteristics required to deploy. It has to be written in the Hashicorp Configuration Language (HCL). The deployment of Scaleway resources is done using the [Scaleway Provider for Terraform](https://www.terraform.io/docs/providers/scaleway/index.html). For more information about HCL, refer to the [official documentation](https://www.terraform.io/docs/configuration/syntax.html). 1. Create a project folder (for example `scaleway-terraform`) and navigate into the newly created directory: ``` @@ -469,7 +469,7 @@ We now have a first Instance up and running. Next, we will modify it by restrict ## Adding resources to an infrastructure -Terraform allows us to add additional resources to infrastructures and is capable of managing both [Instances](https://www.scaleway.com/en/virtual-instances/) and [Elastic Metal servers](https://www.scaleway.com/en/elastic-metal/). Let us add an Elastic Metal server to our Terraform project using the [Elastic Metal module](https://www.terraform.io/docs/providers/scaleway/r/baremetal_server.html/) of the Scaleway provider. +Terraform allows us to add additional resources to infrastructures and is capable of managing both [Instances](https://www.scaleway.com/en/virtual-instances/) and [Elastic Metal servers](https://www.scaleway.com/en/elastic-metal/). Let us add an Elastic Metal server to our Terraform project using the [Elastic Metal module](https://www.terraform.io/docs/providers/scaleway/r/baremetal_server.html) of the Scaleway provider. - Open the file `scaleway.tf` in a text editor and add the `"scaleway_account_ssh_key"` data source and the `scaleway_baremetal_server` resource as follows: diff --git a/tutorials/upgrade-managed-postgresql-database/index.mdx b/tutorials/upgrade-managed-postgresql-database/index.mdx index 15f97d61b5..9cf102a000 100644 --- a/tutorials/upgrade-managed-postgresql-database/index.mdx +++ b/tutorials/upgrade-managed-postgresql-database/index.mdx @@ -78,9 +78,9 @@ There are three steps to completing a manual migration: creating a new PostgreSQ * ``: The ID of your **old** Database Instance, for example: `ad085d32-16e0-4ce6-862c-8e70c56b9ee7`. * ``: The name of your PostgreSQL database, for example, `customer_data`. * ``: A common name for your database backup, for example, `customer_data_backup`. - * ``: The expiration date for your backup in [ISO8601](https://www.iso.org/iso-8601-date-and-time-format.html/) format, for example: `2021-06-26T13:00:00Z`. + * ``: The expiration date for your backup in [ISO8601](https://www.iso.org/iso-8601-date-and-time-format.html) format, for example: `2021-06-26T13:00:00Z`. - You will receive a [JSON formatted](https://www.json.org/json-en.html/) output with information about the backup: + You will receive a [JSON formatted](https://www.json.org/json-en.html) output with information about the backup: ```json { @@ -126,7 +126,7 @@ There are three steps to completing a manual migration: creating a new PostgreSQ * ``: The name of your PostgreSQL database, for example: `customer_data`. * ``: The ID of your **new** Database Instance, for example: `d401ff10-350d-4707-9571-c861677f0031`. - You will receive a [JSON formatted](https://www.json.org/json-en.html/) output with information about the backup. The **status** should be `restoring`: + You will receive a [JSON formatted](https://www.json.org/json-en.html) output with information about the backup. The **status** should be `restoring`: ```json { diff --git a/tutorials/use-cockpit-with-terraform/index.mdx b/tutorials/use-cockpit-with-terraform/index.mdx index dbabac2b2f..f230eb9c83 100644 --- a/tutorials/use-cockpit-with-terraform/index.mdx +++ b/tutorials/use-cockpit-with-terraform/index.mdx @@ -25,7 +25,7 @@ In this tutorial, you will learn how to get started with Cockpit using a Terrafo - A Scaleway account logged into the [console](https://console.scaleway.com/) - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization - [Configured an SSH key](/identity-and-access-management/organizations-and-projects/how-to/create-ssh-key/#how-to-generate-an-ed25519-ssh-key-pair-on-macos-and-linux) -- [Installed Terraform](https://www.terraform.io/downloads.html/) on your local machine +- [Installed Terraform](https://www.terraform.io/downloads.html) on your local machine ## Configuring your environment diff --git a/tutorials/veeam-backup-replication-s3/index.mdx b/tutorials/veeam-backup-replication-s3/index.mdx index 83edc5a298..b1b5103fbd 100644 --- a/tutorials/veeam-backup-replication-s3/index.mdx +++ b/tutorials/veeam-backup-replication-s3/index.mdx @@ -14,7 +14,7 @@ dates: ## Veeam Overview -[Veeam Backup & Replication](https://www.veeam.com/vm-backup-recovery-replication-software.html/) is a proprietary backup application, developed by Veeam for virtual environments built on VMware vSphere and Microsoft Hyper-V hypervisors. +[Veeam Backup & Replication](https://www.veeam.com/vm-backup-recovery-replication-software.html) is a proprietary backup application, developed by Veeam for virtual environments built on VMware vSphere and Microsoft Hyper-V hypervisors. The solution provides backup, restore, and replication functionality for virtual machines, physical servers, and workstations as well as cloud-based workloads. @@ -28,8 +28,8 @@ The following schema represents the functionality of Veeam Backup and Restore wh - A Scaleway account logged into the [console](https://console.scaleway.com/) - [Owner](/identity-and-access-management/iam/concepts/#owner) status or [IAM permissions](/identity-and-access-management/iam/concepts/#permission) allowing you to perform actions in the intended Organization -- A machine running [VMware vSphere](https://www.vmware.com/products/vsphere.html/) -- An intermediate Instance running [Veeam Backup & Replication](https://www.veeam.com/vm-backup-recovery-replication-software.html/) +- A machine running [VMware vSphere](https://www.vmware.com/products/vsphere.html) +- An intermediate Instance running [Veeam Backup & Replication](https://www.veeam.com/vm-backup-recovery-replication-software.html) - An [Object Storage bucket](/storage/object/how-to/create-a-bucket/) @@ -49,7 +49,7 @@ The following schema represents the functionality of Veeam Backup and Restore wh ## Configuring Veeam backup & replication - A Veeam **Enterprise** or **Enterprise Plus** license is required to configure the Scale out Repository feature which is required for Object Storage. Contact your Veeam Account Manager in case you need a license upgrade. [More information on Veeam Backup & Replication licensing.](https://www.veeam.com/products-edition-comparison.html/) + A Veeam **Enterprise** or **Enterprise Plus** license is required to configure the Scale out Repository feature which is required for Object Storage. Contact your Veeam Account Manager in case you need a license upgrade. [More information on Veeam Backup & Replication licensing.](https://www.veeam.com/products-edition-comparison.html) ### Configuring an Object Storage repository diff --git a/tutorials/visualize-timeseries-data-timescaledb-grafana/index.mdx b/tutorials/visualize-timeseries-data-timescaledb-grafana/index.mdx index f78d3199a9..0afd4ddc7f 100644 --- a/tutorials/visualize-timeseries-data-timescaledb-grafana/index.mdx +++ b/tutorials/visualize-timeseries-data-timescaledb-grafana/index.mdx @@ -144,7 +144,7 @@ In this tutorial, we use a [Database for PostgreSQL](https://www.scaleway.com/en DELETE 1 ``` - Run a [garbage collection](https://www.postgresql.org/docs/current/sql-vacuum.html/) using the `VACCUM` command to clean up the table and to free unused space: + Run a [garbage collection](https://www.postgresql.org/docs/current/sql-vacuum.html) using the `VACCUM` command to clean up the table and to free unused space: ``` VACUUM airquality; diff --git a/tutorials/vuls-security-scanner/index.mdx b/tutorials/vuls-security-scanner/index.mdx index dd1544e9c8..8cbbbf55fd 100644 --- a/tutorials/vuls-security-scanner/index.mdx +++ b/tutorials/vuls-security-scanner/index.mdx @@ -400,4 +400,4 @@ Vuls provides a graphical web-based interface, called [VulsRepo](https://github. 8. Open a web browser and point it to `http://YOUR_SERVER_IP:5111` to visualize the Vuls reports: 9. Optionally, configure a [Nginx reverse proxy](/tutorials/nginx-reverse-proxy/) to restrict the access to the reports. - For more information and guidance with advanced configuration, refer to the [official Vuls documentation](https://vuls.io/docs/en/abstract.html/). \ No newline at end of file + For more information and guidance with advanced configuration, refer to the [official Vuls documentation](https://vuls.io/docs/en/abstract.html). \ No newline at end of file diff --git a/tutorials/zammad-ticketing/index.mdx b/tutorials/zammad-ticketing/index.mdx index a211c3e84b..12af197e42 100644 --- a/tutorials/zammad-ticketing/index.mdx +++ b/tutorials/zammad-ticketing/index.mdx @@ -210,4 +210,4 @@ And much more. ## Conclusion -You have configured a Virtual Cloud Instance with the Zammad ticketing solution, linked it to an Elasticserarch database, and configured a secure connection to the Instance using HTTPS. You also did a short walkthrough of some features of Zammad. For more information on all features of the solution, refer to the [official documentation](https://docs.zammad.org/en/latest/index.html/). \ No newline at end of file +You have configured a Virtual Cloud Instance with the Zammad ticketing solution, linked it to an Elasticserarch database, and configured a secure connection to the Instance using HTTPS. You also did a short walkthrough of some features of Zammad. For more information on all features of the solution, refer to the [official documentation](https://docs.zammad.org/en/latest/index.html). \ No newline at end of file