diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index d8e045265d..5622636083 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -22,12 +22,12 @@ SHELL ["pwsh", "-command"]
RUN $url = "https://github.com/JanDeDobbeleer/oh-my-posh/releases/latest";$latestVersion = (Invoke-WebRequest -Uri $url).Content | Select-String -Pattern "v[0-9]+\.[0-9]+\.[0-9]+" | Select-Object -ExpandProperty Matches | Select-Object -ExpandProperty Value;$downloadUrl = "https://github.com/JanDeDobbeleer/oh-my-posh/releases/download/$latestVersion/posh-linux-amd64.tar.gz";wget https://github.com/JanDeDobbeleer/oh-my-posh/releases/download/$latestVersion/posh-linux-amd64 -O /usr/local/bin/oh-my-posh
RUN chmod +x /usr/local/bin/oh-my-posh
-# Install Azure Developer CLI (https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/)
+# Install Azure Developer CLI (https://learn.microsoft.com/azure/developer/azure-developer-cli/)
RUN curl -fsSL https://aka.ms/install-azd.sh | bash
# Switch to non-root user:
WORKDIR /home/vscode
USER vscode
-# Install Azure PowerShell modules (https://docs.microsoft.com/en-us/powershell/azure/install-az-ps)
+# Install Azure PowerShell modules (https://learn.microsoft.com/powershell/azure/install-az-ps)
RUN Install-Module -Name Az -Force
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 589aa0f286..6df1323bc9 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -8,7 +8,7 @@ assignees: ''
---
-
+
**Is your issue related to a Jumpstart scenario, ArcBox, HCIBox, or Agora?**
@@ -31,12 +31,12 @@ assignees: ''
**Have you looked at the Troubleshooting and Logs section?**
**Screenshots**
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 92c7d0d90b..d4c6fce7ed 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -6,10 +6,10 @@ labels: ''
assignees: ''
---
-
+
**Is your feature request related to a new Jumpstart scenario you would like to contribute?**
-
+
**Is your feature request related to a problem? Please describe.**
diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template_bootstrap.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template_bootstrap.md
index 609083681b..0db7002514 100644
--- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template_bootstrap.md
+++ b/.github/PULL_REQUEST_TEMPLATE/pull_request_template_bootstrap.md
@@ -16,10 +16,7 @@
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
## Checklist
-
-
- [ ] My code follows the code style of this project.
- [ ] My change requires a change to the documentation.
- [ ] I have updated the documentation and/or images accordingly.
-
-
+- [ ] I've read the Jumpstart [contribution guidelines](https://aka.ms/JumpstartContribution).
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index d6979e4cd7..b9dbcadd06 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1 +1 @@
-./docs/code_of_conduct/_index.md
\ No newline at end of file
+https://aka.ms/JumpstartCOC
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0503439bb8..a28e22c917 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1 +1 @@
-./docs/contributing/_index.md
\ No newline at end of file
+https://aka.ms/JumpstartContribution
\ No newline at end of file
diff --git a/README.md b/README.md
deleted file mode 100644
index e95822b96e..0000000000
--- a/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# Azure Arc Jumpstart documentation
-
-If you are looking to explore the Jumpstart documentation, please go to the documentation website:
-
-https://azurearcjumpstart.io
-
-This repository contains the markdown files which generate the above website. See below for guidance on running with a local environment to contribute to the docs.
-
-> **Disclaimer: The intention for the Azure Arc Jumpstart project is to focus on the core Azure Arc capabilities, deployment scenarios, use-cases, and ease of use. It does not focus on Azure best-practices or the other tech and OSS projects being leveraged in the scenarios and code. Jumpstart scenarios, ArcBox, HCIBox, and Agora are all intended for evaluation, training and demo purposes only and are not supported for production use cases.**
-
-## Want to help and contribute?
-
-Before making your first contribution, make sure to review the [contributing](https://azurearcjumpstart.io/contributing/) section in the docs.
-
-* Found a bug?! Use the [Bug Report](https://github.com/microsoft/azure_arc/issues/new?assignees=&labels=bug&template=bug_report.md&title=) issue template to let us know.
-
-* To ask for a Jumpstart scenario, create one yourself or submit an Azure Arc core product feature request, use the [Feature Request](https://github.com/microsoft/azure_arc/issues/new?assignees=&labels=&template=feature_request.md&title=) issue template.
-
-This project welcomes contributions and suggestions. Most contributions require you to agree to a
-Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
-the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
-
-When you submit a pull request, a CLA bot will automatically determine whether you need to provide
-a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
-provided by the bot. You will only need to do this once across all repos using our CLA.
-
-This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
-For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
-contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
-
-## Jumpstart Roadmap
-
-Up-to-date roadmap for the Azure Arc Jumpstart scenarios can be found under [the repository GitHub Project](https://github.com/microsoft/azure_arc/projects/1).
-
-## Legal Notices
-
-Microsoft and any contributors grant you a license to the Microsoft documentation and other content
-in this repository under the [Creative Commons Attribution 4.0 International Public License](https://creativecommons.org/licenses/by/4.0/legalcode),
-see the [LICENSE](LICENSE) file, and grant you a license to any code in the repository under the [MIT License](https://opensource.org/licenses/MIT), see the
-[LICENSE-CODE](LICENSE-CODE) file.
-
-Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation
-may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries.
-The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks.
-Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653.
-
-Privacy information can be found at https://privacy.microsoft.com/
-
-Microsoft and any contributors reserve all other rights, whether under their respective copyrights, patents,
-or trademarks, whether by implication, estoppel or otherwise.
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
deleted file mode 100644
index fee586f006..0000000000
--- a/RELEASE_NOTES.md
+++ /dev/null
@@ -1 +0,0 @@
-./docs/release_notes/_index.md
\ No newline at end of file
diff --git a/SECURITY.md b/SECURITY.md
index ca2bf7a61f..92b5ab00c3 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -1 +1 @@
-./docs/security/_index.md
\ No newline at end of file
+https://github.com/Azure/arc_jumpstart_docs/blob/main/SECURITY.md
\ No newline at end of file
diff --git a/arc_data_services/deploy/scripts/create-sql-keytab.sh b/arc_data_services/deploy/scripts/create-sql-keytab.sh
index 5582e71d09..1e60212577 100644
--- a/arc_data_services/deploy/scripts/create-sql-keytab.sh
+++ b/arc_data_services/deploy/scripts/create-sql-keytab.sh
@@ -11,7 +11,7 @@
# $ sudo apt-get install krb5-user
#
# 2) The tool 'adutil' should be pre-installed if using --use-adutil flag.
-# Installation instructions: https://docs.microsoft.com/en-us/sql/linux/sql-server-linux-ad-auth-adutil-introduction?view=sql-server-ver15&tabs=ubuntu
+# Installation instructions: https://learn.microsoft.com/sql/linux/sql-server-linux-ad-auth-adutil-introduction?view=sql-server-ver15&tabs=ubuntu
#
# 3) User must kinit with an AD user when using --use-adutil flag.
#
diff --git a/arc_data_services/deploy/scripts/rotate-sql-keytab.sh b/arc_data_services/deploy/scripts/rotate-sql-keytab.sh
index d1668dfeb0..9987ee9909 100644
--- a/arc_data_services/deploy/scripts/rotate-sql-keytab.sh
+++ b/arc_data_services/deploy/scripts/rotate-sql-keytab.sh
@@ -11,7 +11,7 @@
# $ sudo apt-get install krb5-user
#
# 2) The tool 'adutil' should be pre-installed if using --use-adutil flag.
-# Installation instructions: https://docs.microsoft.com/en-us/sql/linux/sql-server-linux-ad-auth-adutil-introduction?view=sql-server-ver15&tabs=ubuntu
+# Installation instructions: https://learn.microsoft.com/sql/linux/sql-server-linux-ad-auth-adutil-introduction?view=sql-server-ver15&tabs=ubuntu
#
# 3) User must kinit with an AD user when using --use-adutil flag.
#
diff --git a/arc_data_services/deploy/yaml/README.md b/arc_data_services/deploy/yaml/README.md
index 436e52b64e..9a850bc170 100644
--- a/arc_data_services/deploy/yaml/README.md
+++ b/arc_data_services/deploy/yaml/README.md
@@ -1,6 +1,6 @@
# Azure Arc enabled data services - Sample yaml files
-This folder contains deployment related scripts for Azure Arc enabled data services. These scripts can be applied using kubectl command-line tool. If you are performing any operations on the services using the [Azure Data CLI](https://docs.microsoft.com/en-us/sql/azdata/install/deploy-install-azdata?toc=%2Fazure%2Fazure-arc%2Fdata%2Ftoc.json&bc=%2Fazure%2Fazure-arc%2Fdata%2Fbreadcrumb%2Ftoc.json&view=sql-server-ver15) tool then ensure that you have the latest version always.
+This folder contains deployment related scripts for Azure Arc enabled data services. These scripts can be applied using kubectl command-line tool. If you are performing any operations on the services using the [Azure Data CLI](https://learn.microsoft.com/sql/azdata/install/deploy-install-azdata?toc=%2Fazure%2Fazure-arc%2Fdata%2Ftoc.json&bc=%2Fazure%2Fazure-arc%2Fdata%2Fbreadcrumb%2Ftoc.json&view=sql-server-ver15) tool then ensure that you have the latest version always.
## Deployment yaml files for kube-native operations
diff --git a/arc_data_services/test/launcher/overlays/openshift/scc.yaml b/arc_data_services/test/launcher/overlays/openshift/scc.yaml
index c165fe4adb..90ea435acf 100644
--- a/arc_data_services/test/launcher/overlays/openshift/scc.yaml
+++ b/arc_data_services/test/launcher/overlays/openshift/scc.yaml
@@ -1,5 +1,5 @@
# For Azure Arc Connected Cluster
-# https://docs.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli#prerequisites
+# https://learn.microsoft.com/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli#prerequisites
# TODO: Remove this once Connected Cluster removes SCC for fluentbit for node polling
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
diff --git a/arc_data_services/upgrade/yaml/README.md b/arc_data_services/upgrade/yaml/README.md
index 73860ecef8..0bf24ac444 100644
--- a/arc_data_services/upgrade/yaml/README.md
+++ b/arc_data_services/upgrade/yaml/README.md
@@ -1,6 +1,6 @@
# Azure Arc-enabled data services - Sample yaml files
-This folder contains upgrade related scripts for Azure Arc-enabled data services. These scripts can be applied using kubectl command-line tool. If you are performing any operations on the services using the [Azure Data CLI](https://docs.microsoft.com/sql/azdata/install/deploy-install-azdata?toc=%2Fazure%2Fazure-arc%2Fdata%2Ftoc.json&bc=%2Fazure%2Fazure-arc%2Fdata%2Fbreadcrumb%2Ftoc.json&view=sql-server-ver15) tool then ensure that you have the latest version always.
+This folder contains upgrade related scripts for Azure Arc-enabled data services. These scripts can be applied using kubectl command-line tool. If you are performing any operations on the services using the [Azure Data CLI](https://learn.microsoft.com/sql/azdata/install/deploy-install-azdata?toc=%2Fazure%2Fazure-arc%2Fdata%2Ftoc.json&bc=%2Fazure%2Fazure-arc%2Fdata%2Fbreadcrumb%2Ftoc.json&view=sql-server-ver15) tool then ensure that you have the latest version always.
## Upgrade yaml files for kube-native operations
diff --git a/archive/arc_updateManagement/_index.md b/archive/arc_updateManagement/_index.md
deleted file mode 100644
index cbf4396d3a..0000000000
--- a/archive/arc_updateManagement/_index.md
+++ /dev/null
@@ -1,156 +0,0 @@
-
-
-
-
diff --git a/archive/arc_vm_extension_mma_arm/_index.md b/archive/arc_vm_extension_mma_arm/_index.md
deleted file mode 100644
index 15c3d46c70..0000000000
--- a/archive/arc_vm_extension_mma_arm/_index.md
+++ /dev/null
@@ -1,185 +0,0 @@
-
-
diff --git a/azure_arc_app_services_jumpstart/aks/ARM/artifacts/Bootstrap.ps1 b/azure_arc_app_services_jumpstart/aks/ARM/artifacts/Bootstrap.ps1
index ae34604ac6..fd3231b007 100644
--- a/azure_arc_app_services_jumpstart/aks/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_app_services_jumpstart/aks/ARM/artifacts/Bootstrap.ps1
@@ -71,7 +71,7 @@ Write-Host "Extending C:\ partition to the maximum size"
Resize-Partition -DriveLetter C -Size $(Get-PartitionSupportedSize -DriveLetter C).SizeMax
# Downloading global Jumpstart artifacts
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Downloading GitHub artifacts for AppServicesLogonScript.ps1
if ($deployAppService -eq $true -Or $deployFunction -eq $true -Or $deployApiMgmt -eq $true -Or $deployLogicApp -eq $true) {
diff --git a/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1 b/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1
index 18bb83b795..e4d13cfdc1 100644
--- a/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1
@@ -48,7 +48,7 @@ Write-Host "Extending C:\ partition to the maximum size"
Resize-Partition -DriveLetter C -Size $(Get-PartitionSupportedSize -DriveLetter C).SizeMax
# Downloading global Jumpstart artifacts
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Downloading GitHub artifacts for AppServicesLogonScript.ps1
Invoke-WebRequest ($templateBaseUrl + "artifacts/capiStorageClass.yaml") -OutFile "C:\Temp\capiStorageClass.yaml"
diff --git a/azure_arc_data_jumpstart/aks/ARM/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/aks/ARM/artifacts/Bootstrap.ps1
index e87138317c..420c461602 100644
--- a/azure_arc_data_jumpstart/aks/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/aks/ARM/artifacts/Bootstrap.ps1
@@ -86,7 +86,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.json") -OutFile "${t
Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.parameters.json") -OutFile "${tempDir}\postgreSQL.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "${tempDir}\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "${tempDir}\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "${tempDir}\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "${tempDir}\wallpaper.png"
Invoke-WebRequest ($templateBaseUrl + "artifacts/adConnector.yaml") -OutFile "${tempDir}\adConnector.yaml"
Invoke-WebRequest ($templateBaseUrl + "artifacts/adConnectorCMK.yaml") -OutFile "${tempDir}\adConnectorCMK.yaml"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIADAuthCMK.yaml") -OutFile "${tempDir}\SQLMIADAuthCMK.yaml"
diff --git a/azure_arc_data_jumpstart/aks/DR/ARM/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/aks/DR/ARM/artifacts/Bootstrap.ps1
index 9997604475..22a2e827a3 100644
--- a/azure_arc_data_jumpstart/aks/DR/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/aks/DR/ARM/artifacts/Bootstrap.ps1
@@ -76,7 +76,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.json") -OutFile "C:\
Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.parameters.json") -OutFile "C:\Temp\postgreSQL.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "C:\Temp\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "C:\Temp\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_data_jumpstart/aks/Migration/ARM/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/aks/Migration/ARM/artifacts/Bootstrap.ps1
index f92fb284c1..dfb7c12f6b 100644
--- a/azure_arc_data_jumpstart/aks/Migration/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/aks/Migration/ARM/artifacts/Bootstrap.ps1
@@ -75,7 +75,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMI.json") -OutFile "C:\Temp\
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMI.parameters.json") -OutFile "C:\Temp\SQLMI.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "C:\Temp\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "C:\Temp\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
workflow ClientTools_01 {
diff --git a/azure_arc_data_jumpstart/aro/ARM/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/aro/ARM/artifacts/Bootstrap.ps1
index 371682ba24..eeb841c469 100644
--- a/azure_arc_data_jumpstart/aro/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/aro/ARM/artifacts/Bootstrap.ps1
@@ -79,7 +79,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.json") -OutFile "C:\
Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.parameters.json") -OutFile "C:\Temp\postgreSQL.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "C:\Temp\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "C:\Temp\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
Invoke-WebRequest ($templateBaseUrl + "artifacts/AROSCC.yaml") -OutFile "C:\Temp\AROSCC.yaml"
diff --git a/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1
index 97ebbafe42..d32db96a65 100644
--- a/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/Bootstrap.ps1
@@ -84,7 +84,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.json") -OutFile "C:\
Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.parameters.json") -OutFile "C:\Temp\postgreSQL.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "C:\Temp\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "C:\Temp\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_data_jumpstart/eks/terraform/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/eks/terraform/artifacts/Bootstrap.ps1
index 4bfbbbb557..81133e6c4e 100644
--- a/azure_arc_data_jumpstart/eks/terraform/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/eks/terraform/artifacts/Bootstrap.ps1
@@ -89,7 +89,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.json") -OutFile "C:\
Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.parameters.json") -OutFile "C:\Temp\postgreSQL.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "C:\Temp\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "C:\Temp\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
diff --git a/azure_arc_data_jumpstart/gke/terraform/artifacts/wallpaper.png b/azure_arc_data_jumpstart/gke/terraform/artifacts/wallpaper.png
new file mode 100644
index 0000000000..e6b4ecc2b3
Binary files /dev/null and b/azure_arc_data_jumpstart/gke/terraform/artifacts/wallpaper.png differ
diff --git a/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/Bootstrap.ps1
index dd23b70782..1d7c37af3f 100644
--- a/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/Bootstrap.ps1
@@ -83,7 +83,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.json") -OutFile "C:\
Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.parameters.json") -OutFile "C:\Temp\postgreSQL.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "C:\Temp\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "C:\Temp\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/Bootstrap.ps1 b/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/Bootstrap.ps1
index 324cc8a3a0..43b018b8d0 100644
--- a/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/Bootstrap.ps1
+++ b/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/Bootstrap.ps1
@@ -83,7 +83,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.json") -OutFile "C:\
Invoke-WebRequest ($templateBaseUrl + "artifacts/postgreSQL.parameters.json") -OutFile "C:\Temp\postgreSQL.parameters.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/SQLMIEndpoints.ps1") -OutFile "C:\Temp\SQLMIEndpoints.ps1"
Invoke-WebRequest "https://github.com/ErikEJ/SqlQueryStress/releases/download/102/SqlQueryStress.zip" -OutFile "C:\Temp\SqlQueryStress.zip"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/artifacts/Bootstrap.ps1 b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/artifacts/Bootstrap.ps1
index 9952ce3603..5b03d7a538 100644
--- a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/artifacts/Bootstrap.ps1
+++ b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/artifacts/Bootstrap.ps1
@@ -50,7 +50,7 @@ Resize-Partition -DriveLetter C -Size $(Get-PartitionSupportedSize -DriveLetter
# Download artifacts
Invoke-WebRequest ($templateBaseUrl + "artifacts/LogonScript.ps1") -OutFile "$tempDir\LogonScript.ps1"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "$tempDir\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "$tempDir\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full_akri/bicep_template/artifacts/Bootstrap.ps1 b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full_akri/bicep_template/artifacts/Bootstrap.ps1
index 19cb613ffa..28b1dff0da 100644
--- a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full_akri/bicep_template/artifacts/Bootstrap.ps1
+++ b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full_akri/bicep_template/artifacts/Bootstrap.ps1
@@ -59,7 +59,7 @@ Resize-Partition -DriveLetter C -Size $(Get-PartitionSupportedSize -DriveLetter
# Download artifacts
Invoke-WebRequest ($templateBaseUrl + "artifacts/LogonScript.ps1") -OutFile "$tempDir\LogonScript.ps1"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "$tempDir\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "$tempDir\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single/arm_template/artifacts/Bootstrap.ps1 b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single/arm_template/artifacts/Bootstrap.ps1
index b1b5b667f3..04d4e88b71 100644
--- a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single/arm_template/artifacts/Bootstrap.ps1
+++ b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single/arm_template/artifacts/Bootstrap.ps1
@@ -33,7 +33,7 @@ $ErrorActionPreference = "SilentlyContinue"
# Downloading GitHub artifacts
Invoke-WebRequest ($templateBaseUrl + "artifacts/LogonScript.ps1") -OutFile "C:\Temp\LogonScript.ps1"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_akri/arm_template/artifacts/Bootstrap.ps1 b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_akri/arm_template/artifacts/Bootstrap.ps1
index b1b5b667f3..04d4e88b71 100644
--- a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_akri/arm_template/artifacts/Bootstrap.ps1
+++ b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_akri/arm_template/artifacts/Bootstrap.ps1
@@ -33,7 +33,7 @@ $ErrorActionPreference = "SilentlyContinue"
# Downloading GitHub artifacts
Invoke-WebRequest ($templateBaseUrl + "artifacts/LogonScript.ps1") -OutFile "C:\Temp\LogonScript.ps1"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
# Installing tools
workflow ClientTools_01
diff --git a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_vi/artifacts/Bootstrap.ps1 b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_vi/artifacts/Bootstrap.ps1
index b9c7d3cf5e..9c23d38883 100644
--- a/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_vi/artifacts/Bootstrap.ps1
+++ b/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_single_vi/artifacts/Bootstrap.ps1
@@ -70,7 +70,7 @@ if (($rdpPort -ne $null) -and ($rdpPort -ne "") -and ($rdpPort -ne "3389")) {
Invoke-WebRequest ($templateBaseUrl + "artifacts/LogonScript.ps1") -OutFile "C:\Temp\LogonScript.ps1"
Invoke-WebRequest ($templateBaseUrl + "artifacts/longhorn.yaml") -OutFile "C:\Temp\longhorn.yaml"
Invoke-WebRequest ($templateBaseUrl + "artifacts/video/video.mp4") -OutFile "C:\Temp\video.mp4"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png"
Invoke-WebRequest "https://github.com/certbot/certbot/releases/latest/download/certbot-beta-installer-win_amd64_signed.exe" -OutFile "C:\Temp\certbot-beta-installer-win_amd64_signed.exe"
##############################################################
diff --git a/azure_arc_ml_jumpstart/aks/arm_template/artifacts/Bootstrap.ps1 b/azure_arc_ml_jumpstart/aks/arm_template/artifacts/Bootstrap.ps1
index eb125104e6..691d673cd0 100644
--- a/azure_arc_ml_jumpstart/aks/arm_template/artifacts/Bootstrap.ps1
+++ b/azure_arc_ml_jumpstart/aks/arm_template/artifacts/Bootstrap.ps1
@@ -31,7 +31,7 @@ New-Item -Path $tempDir -ItemType directory -Force
Start-Transcript "C:\Temp\Bootstrap.log"
-# https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_preference_variables?view=powershell-7.1#erroractionpreference
+# https://learn.microsoft.com/powershell/module/microsoft.powershell.core/about/about_preference_variables?view=powershell-7.1#erroractionpreference
# Show errors, but to continue nonetheless
$ErrorActionPreference = 'Continue'
@@ -61,7 +61,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/inference.zip") -OutFile "C:\Te
Invoke-WebRequest ($templateBaseUrl + "artifacts/1.Get_WS.py") -OutFile "C:\Temp\1.Get_WS.py"
Invoke-WebRequest ($templateBaseUrl + "artifacts/2.Attach_Arc.py") -OutFile "C:\Temp\2.Attach_Arc.py"
Invoke-WebRequest ($templateBaseUrl + "artifacts/3.Create_MNIST_Dataset.py") -OutFile "C:\Temp\3.Create_MNIST_Dataset.py"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "C:\Temp\wallpaper.png" # Wallpaper is shared from main
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "C:\Temp\wallpaper.png" # Wallpaper is shared from main
# Unzip training and inference payloads
Expand-Archive -LiteralPath "C:\Temp\train.zip" -DestinationPath "C:\Temp"
diff --git a/azure_arc_ml_jumpstart/aks/arm_template/artifacts/train/notebook/img-classification-training.ipynb b/azure_arc_ml_jumpstart/aks/arm_template/artifacts/train/notebook/img-classification-training.ipynb
index ddb7c4f7d9..c8a6080053 100644
--- a/azure_arc_ml_jumpstart/aks/arm_template/artifacts/train/notebook/img-classification-training.ipynb
+++ b/azure_arc_ml_jumpstart/aks/arm_template/artifacts/train/notebook/img-classification-training.ipynb
@@ -185,7 +185,7 @@
"\n",
"### Download the MNIST dataset\n",
"\n",
- "Use Azure Open Datasets to get the raw MNIST data files. [Azure Open Datasets](https://docs.microsoft.com/azure/open-datasets/overview-what-are-open-datasets) are curated public datasets that you can use to add scenario-specific features to machine learning solutions for more accurate models. Each dataset has a corrseponding class, `MNIST` in this case, to retrieve the data in different ways.\n",
+ "Use Azure Open Datasets to get the raw MNIST data files. [Azure Open Datasets](https://learn.microsoft.com/azure/open-datasets/overview-what-are-open-datasets) are curated public datasets that you can use to add scenario-specific features to machine learning solutions for more accurate models. Each dataset has a corrseponding class, `MNIST` in this case, to retrieve the data in different ways.\n",
"\n",
"This code retrieves the data as a `FileDataset` object, which is a subclass of `Dataset`. A `FileDataset` references single or multiple files of any format in your datastores or public urls. The class provides you with the ability to download or mount the files to your compute by creating a reference to the data source location. Additionally, you register the Dataset to your workspace for easy retrieval during training.\n",
"\n",
diff --git a/azure_arc_servers_jumpstart/esu/artifacts/Bootstrap.ps1 b/azure_arc_servers_jumpstart/esu/artifacts/Bootstrap.ps1
index 7d6bc8fc00..a0b741d0a0 100644
--- a/azure_arc_servers_jumpstart/esu/artifacts/Bootstrap.ps1
+++ b/azure_arc_servers_jumpstart/esu/artifacts/Bootstrap.ps1
@@ -98,7 +98,7 @@ foreach ($app in $appsToInstall) {
Write-Header "Fetching GitHub Artifacts"
Write-Host "Fetching Artifacts"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile $Env:ESUDir\wallpaper.png
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile $Env:ESUDir\wallpaper.png
Write-Host "Fetching Artifacts"
Invoke-WebRequest ($Env:templateBaseUrl + "artifacts/LogonScript.ps1") -OutFile $Env:ESUDir\LogonScript.ps1
diff --git a/azure_arc_sqlsrv_jumpstart/aws/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl b/azure_arc_sqlsrv_jumpstart/aws/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
index 134c967425..dbd3c7db06 100644
--- a/azure_arc_sqlsrv_jumpstart/aws/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
+++ b/azure_arc_sqlsrv_jumpstart/aws/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
@@ -10,7 +10,7 @@ $resourceTags= @{"Project"="jumpstart_azure_arc_sql"}
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
@@ -136,7 +136,7 @@ function installPowershellModule() {
if ([version]$version -lt [version]"6.2.4") {
Write-Error -Message ("Could not install Az module: Powershell $version does not support having both the AzureRM and Az modules installed. " +
"If you need to keep AzureRM available on your system, install the Az module for PowerShell 6.2.4 or later. " +
- "For more information, see: https://docs.microsoft.com/en-us/powershell/azure/migrate-from-azurerm-to-az")
+ "For more information, see: https://learn.microsoft.com/powershell/azure/migrate-from-azurerm-to-az")
return
}
diff --git a/azure_arc_sqlsrv_jumpstart/azure/windows/archive/vanilla/arm_template/scripts/installArcAgentSQL.ps1 b/azure_arc_sqlsrv_jumpstart/azure/windows/archive/vanilla/arm_template/scripts/installArcAgentSQL.ps1
index 829bb121b9..b1b1723a92 100644
--- a/azure_arc_sqlsrv_jumpstart/azure/windows/archive/vanilla/arm_template/scripts/installArcAgentSQL.ps1
+++ b/azure_arc_sqlsrv_jumpstart/azure/windows/archive/vanilla/arm_template/scripts/installArcAgentSQL.ps1
@@ -22,7 +22,7 @@ $workspaceName = $logAnalyticsWorkspaceName
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
@@ -185,7 +185,7 @@ function Install-PowershellModule() {
if ([version]$version -lt [version]"6.2.4") {
Write-Warning -Category NotInstalled -Message ("Could not install Az module: Powershell $version does not support having both the AzureRM and Az modules installed. " +
"If you need to keep AzureRM available on your system, install the Az module for PowerShell 6.2.4 or later. " +
- "For more information, see: https://docs.microsoft.com/en-us/powershell/azure/migrate-from-azurerm-to-az")
+ "For more information, see: https://learn.microsoft.com/powershell/azure/migrate-from-azurerm-to-az")
return
}
diff --git a/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/Bootstrap.ps1 b/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/Bootstrap.ps1
index 7518f4b48d..474ac86098 100644
--- a/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/Bootstrap.ps1
+++ b/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/Bootstrap.ps1
@@ -101,7 +101,7 @@ Invoke-WebRequest ($templateBaseUrl + "azure/windows/defender_sql/arm_template/s
Invoke-WebRequest ($templateBaseUrl + "azure/windows/defender_sql/arm_template/scripts/installArcAgent.ps1") -OutFile "$Env:agentScript\installArcAgent.ps1"
Invoke-WebRequest ($templateBaseUrl + "azure/windows/defender_sql/arm_template/icons/arcsql.ico") -OutFile $Env:ArcJSIconDir\arcsql.ico
Invoke-WebRequest ($templateBaseUrl + "azure/windows/defender_sql/arm_template/scripts/testDefenderForSQL.ps1") -OutFile $Env:ArcJSDir\testDefenderForSQL.ps1
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "$Env:tempDir\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "$Env:tempDir\wallpaper.png"
Write-Header "Configuring Logon Scripts"
diff --git a/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLSP.ps1 b/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLSP.ps1
index 6b6ff217f1..75dc96088e 100644
--- a/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLSP.ps1
+++ b/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLSP.ps1
@@ -24,7 +24,7 @@ $unattended = $spnClientId -And $spnTenantId -And $spnClientSecret
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
diff --git a/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLUser.ps1 b/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLUser.ps1
index 0847ca6f3e..1877f275cf 100644
--- a/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLUser.ps1
+++ b/azure_arc_sqlsrv_jumpstart/azure/windows/defender_sql/arm_template/scripts/installArcAgentSQLUser.ps1
@@ -10,7 +10,7 @@ $arcMachineName = [Environment]::MachineName
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
@@ -46,7 +46,7 @@ function Install-PowershellModule() {
if ([version]$version -lt [version]"6.2.4") {
Write-Warning -Category NotInstalled -Message ("Could not install Az module: Powershell $version does not support having both the AzureRM and Az modules installed. " +
"If you need to keep AzureRM available on your system, install the Az module for PowerShell 6.2.4 or later. " +
- "For more information, see: https://docs.microsoft.com/en-us/powershell/azure/migrate-from-azurerm-to-az")
+ "For more information, see: https://learn.microsoft.com/powershell/azure/migrate-from-azurerm-to-az")
return
}
diff --git a/azure_arc_sqlsrv_jumpstart/gcp/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl b/azure_arc_sqlsrv_jumpstart/gcp/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
index ab661db180..7cc7a51838 100644
--- a/azure_arc_sqlsrv_jumpstart/gcp/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
+++ b/azure_arc_sqlsrv_jumpstart/gcp/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
@@ -10,7 +10,7 @@ $resourceTags= @{"Project"="jumpstart_azure_arc_sql"}
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
@@ -136,7 +136,7 @@ function installPowershellModule() {
if ([version]$version -lt [version]"6.2.4") {
Write-Error -Message ("Could not install Az module: Powershell $version does not support having both the AzureRM and Az modules installed. " +
"If you need to keep AzureRM available on your system, install the Az module for PowerShell 6.2.4 or later. " +
- "For more information, see: https://docs.microsoft.com/en-us/powershell/azure/migrate-from-azurerm-to-az")
+ "For more information, see: https://learn.microsoft.com/powershell/azure/migrate-from-azurerm-to-az")
return
}
diff --git a/azure_arc_sqlsrv_jumpstart/vmware/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl b/azure_arc_sqlsrv_jumpstart/vmware/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
index 134c967425..dbd3c7db06 100644
--- a/azure_arc_sqlsrv_jumpstart/vmware/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
+++ b/azure_arc_sqlsrv_jumpstart/vmware/winsrv/terraform/scripts/install_arc_agent.ps1.tmpl
@@ -10,7 +10,7 @@ $resourceTags= @{"Project"="jumpstart_azure_arc_sql"}
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
@@ -136,7 +136,7 @@ function installPowershellModule() {
if ([version]$version -lt [version]"6.2.4") {
Write-Error -Message ("Could not install Az module: Powershell $version does not support having both the AzureRM and Az modules installed. " +
"If you need to keep AzureRM available on your system, install the Az module for PowerShell 6.2.4 or later. " +
- "For more information, see: https://docs.microsoft.com/en-us/powershell/azure/migrate-from-azurerm-to-az")
+ "For more information, see: https://learn.microsoft.com/powershell/azure/migrate-from-azurerm-to-az")
return
}
diff --git a/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/PowerShell/Bootstrap.ps1 b/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/PowerShell/Bootstrap.ps1
index ca82d7787c..3caf4239b2 100644
--- a/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/PowerShell/Bootstrap.ps1
+++ b/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/PowerShell/Bootstrap.ps1
@@ -138,10 +138,7 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/Settings/mqtt_listener.yml") -O
Invoke-WebRequest ($templateBaseUrl + "artifacts/Settings/mqtt_explorer_settings.json") -OutFile "$aioToolsDir\mqtt_explorer_settings.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/Settings/Bookmarks") -OutFile "$aioToolsDir\Bookmarks"
Invoke-WebRequest ($templateBaseUrl + "artifacts/adx_dashboard/dashboard.json") -OutFile "$aioDataExplorer\dashboard.json"
-
-#Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/jumpstart_wallpaper.png" -OutFile "$aioDirectory\wallpaper.png"
-
-Invoke-WebRequest "https://raw.githubusercontent.com/azure/arc_jumpstart_docs/canary/img/wallpaper/jumpstart_title_wallpaper_dark.png" -OutFile "$aioDirectory\wallpaper.png"
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/jumpstart_wallpaper_dark.png" -OutFile "$aioDirectory\wallpaper.png"
##############################################################
# Testing connectivity to required URLs
diff --git a/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/Settings/Bookmarks b/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/Settings/Bookmarks
index 68a682ff60..fd7e6893d5 100644
--- a/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/Settings/Bookmarks
+++ b/azure_edge_iot_ops_jumpstart/aio_manufacturing/bicep/artifacts/Settings/Bookmarks
@@ -21,7 +21,7 @@
"show_icon": false,
"source": "unknown",
"type": "url",
- "url": "https://azurearcjumpstart.io/"
+ "url": "https://aka.ms/ArcJumpstart/"
}, {
"id": "24",
"name": "Azure Portal",
diff --git a/azure_jumpstart_ag/artifacts/PowerShell/Bootstrap.ps1 b/azure_jumpstart_ag/artifacts/PowerShell/Bootstrap.ps1
index 715b965d96..d8f87108e3 100644
--- a/azure_jumpstart_ag/artifacts/PowerShell/Bootstrap.ps1
+++ b/azure_jumpstart_ag/artifacts/PowerShell/Bootstrap.ps1
@@ -219,7 +219,8 @@ Invoke-WebRequest ($templateBaseUrl + "artifacts/icons/contoso.png") -OutFile $A
Invoke-WebRequest ($templateBaseUrl + "artifacts/icons/contoso.svg") -OutFile $AgIconsDir\contoso.svg
Invoke-WebRequest ($templateBaseUrl + "artifacts/settings/DockerDesktopSettings.json") -OutFile "$AgToolsDir\settings.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/settings/Bookmarks") -OutFile "$AgToolsDir\Bookmarks"
-Invoke-WebRequest "https://raw.githubusercontent.com/$githubAccount/azure_arc/$githubBranch/img/jumpstart_ag.png" -OutFile $AgDirectory\wallpaper.png
+Invoke-WebRequest "https://raw.githubusercontent.com/$githubAccount/arc_jumpstart_docs/$githubBranch/img/wallpaper/agora_wallpaper_dark.png" -OutFile $AgDirectory\wallpaper.png
+
Invoke-WebRequest ($templateBaseUrl + "artifacts/monitoring/grafana-freezer-monitoring.json") -OutFile "$AgMonitoringDir\grafana-freezer-monitoring.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/monitoring/grafana-node-exporter-full.json") -OutFile "$AgMonitoringDir\grafana-node-exporter-full.json"
Invoke-WebRequest ($templateBaseUrl + "artifacts/monitoring/grafana-cluster-global.json") -OutFile "$AgMonitoringDir\grafana-cluster-global.json"
diff --git a/azure_jumpstart_ag/artifacts/icons/contoso.png b/azure_jumpstart_ag/artifacts/icons/contoso.png
new file mode 100644
index 0000000000..0077f7ae1e
Binary files /dev/null and b/azure_jumpstart_ag/artifacts/icons/contoso.png differ
diff --git a/azure_jumpstart_ag/artifacts/settings/Bookmarks b/azure_jumpstart_ag/artifacts/settings/Bookmarks
index 69602069fc..120ebcc514 100644
--- a/azure_jumpstart_ag/artifacts/settings/Bookmarks
+++ b/azure_jumpstart_ag/artifacts/settings/Bookmarks
@@ -182,7 +182,7 @@
"show_icon": false,
"source": "unknown",
"type": "url",
- "url": "https://azurearcjumpstart.io/"
+ "url": "https://aka.ms/ArcJumpstart/"
}, {
"id": "24",
"name": "Azure Portal",
diff --git a/azure_jumpstart_arcbox/artifacts/Bootstrap.ps1 b/azure_jumpstart_arcbox/artifacts/Bootstrap.ps1
index f206bb4fe3..6d95966f98 100644
--- a/azure_jumpstart_arcbox/artifacts/Bootstrap.ps1
+++ b/azure_jumpstart_arcbox/artifacts/Bootstrap.ps1
@@ -153,7 +153,7 @@ Write-Header "Fetching GitHub Artifacts"
# All flavors
Write-Host "Fetching Artifacts for All Flavors"
-Invoke-WebRequest "https://raw.githubusercontent.com/microsoft/azure_arc/main/img/arcbox_wallpaper.png" -OutFile $Env:ArcBoxDir\wallpaper.png
+Invoke-WebRequest "https://raw.githubusercontent.com/Azure/arc_jumpstart_docs/main/img/wallpaper/arcbox_wallpaper_dark.png" -OutFile $Env:ArcBoxDir\wallpaper.png
Invoke-WebRequest ($templateBaseUrl + "artifacts/MonitorWorkbookLogonScript.ps1") -OutFile $Env:ArcBoxDir\MonitorWorkbookLogonScript.ps1
Invoke-WebRequest ($templateBaseUrl + "artifacts/mgmtMonitorWorkbook.parameters.json") -OutFile $Env:ArcBoxDir\mgmtMonitorWorkbook.parameters.json
Invoke-WebRequest ($templateBaseUrl + "artifacts/DeploymentStatus.ps1") -OutFile $Env:ArcBoxDir\DeploymentStatus.ps1
diff --git a/azure_jumpstart_arcbox/artifacts/installArcAgentSQLSP.ps1 b/azure_jumpstart_arcbox/artifacts/installArcAgentSQLSP.ps1
index c935a7405d..b904e9ea6d 100644
--- a/azure_jumpstart_arcbox/artifacts/installArcAgentSQLSP.ps1
+++ b/azure_jumpstart_arcbox/artifacts/installArcAgentSQLSP.ps1
@@ -24,7 +24,7 @@ $unattended = $spnClientId -And $spnTenantId -And $spnClientSecret
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
diff --git a/azure_jumpstart_arcbox/artifacts/installArcAgentSQLUser.ps1 b/azure_jumpstart_arcbox/artifacts/installArcAgentSQLUser.ps1
index b5edc2f73a..073879cf68 100644
--- a/azure_jumpstart_arcbox/artifacts/installArcAgentSQLUser.ps1
+++ b/azure_jumpstart_arcbox/artifacts/installArcAgentSQLUser.ps1
@@ -19,7 +19,7 @@ $unattended = $false
# These optional variables can be replaced with valid service principal details
# if you would like to use this script for a registration at scale scenario, i.e. run it on multiple machines remotely
-# For more information, see https://docs.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
+# For more information, see https://learn.microsoft.com/sql/sql-server/azure-arc/connect-at-scale
#
# For security purposes, passwords should be stored in encrypted files as secure strings
#
diff --git a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDataOps.json b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDataOps.json
index dc3502cce1..80c6568a9b 100644
--- a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDataOps.json
+++ b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDataOps.json
@@ -40,7 +40,7 @@
"kind": "shared",
"properties": {
"displayName": "[parameters('workbookDisplayName')]",
- "serializedData": "{\"version\":\"Notebook/1.0\",\"items\":[{\"type\":1,\"content\":{\"json\":\"# Jumpstart ArcBox Workbook DataOps\\r\\n\\r\\nKeep track of your ArcBox resources by selecting one of the tabs below:\\r\\n____________________________________________________________________________________________________\\r\\n\"},\"name\":\"text - 3\"},{\"type\":11,\"content\":{\"version\":\"LinkItem/1.0\",\"style\":\"tabs\",\"links\":[{\"id\":\"001e53e2-be76-428e-9081-da7ce60368d4\",\"cellValue\":\"selectedTab\",\"linkTarget\":\"parameter\",\"linkLabel\":\"Inventory\",\"subTarget\":\"Inventory\",\"style\":\"link\"},{\"id\":\"547c6d68-f351-4898-bd7f-de56cd1ea984\",\"cellValue\":\"selectedTab\",\"linkTarget\":\"parameter\",\"linkLabel\":\"Monitoring\",\"subTarget\":\"Monitoring\",\"style\":\"link\"},{\"id\":\"942dd542-ac90-4ee4-bb5d-477c931c05b4\",\"cellValue\":\"selectedTab\",\"linkTarget\":\"parameter\",\"linkLabel\":\"Security\",\"subTarget\":\"Security\",\"style\":\"link\"}]},\"customWidth\":\"100\",\"name\":\"links - 7\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\"\"},\"name\":\"text - 4\"},{\"type\":1,\"content\":{\"json\":\"## Jumpstart ArcBox Metrics and Alerts\\r\\n\\r\\n💡 Select your Azure ArcBox subscription and Resource Group to see more information.\"},\"name\":\"text - 1\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{subscriptionId}\"],\"parameters\":[{\"id\":\"1f74ed9a-e3ed-498d-bd5b-f68f3836a117\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscriptionId\",\"label\":\"Subscriptions\",\"type\":6,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"includeAll\":true,\"showDefault\":false}},{\"id\":\"b616a3a3-4271-4208-b1a9-a92a78efed08\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resourceGroup\",\"label\":\"Resource groups\",\"type\":2,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"Resources\\r\\n| summarize by resourceGroup\\r\\n| order by resourceGroup asc\\r\\n| project id=resourceGroup, resourceGroup\",\"crossComponentResources\":[\"{subscriptionId}\"],\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"*\",\"showDefault\":false},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"0e85e0e4-a7e8-4ea8-b291-e444c317843a\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"ResourceTypes\",\"label\":\"Resource types\",\"type\":7,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"includeAll\":true,\"selectAllValue\":\"*\",\"showDefault\":false}},{\"id\":\"f60ea0a0-3703-44ca-a59b-df0246423f41\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"Resources\",\"type\":5,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"Resources\\r\\n| where \\\"*\\\" in ({ResourceTypes}) or type in~({ResourceTypes})\\r\\n| where '*' in~({resourceGroup}) or resourceGroup in~({resourceGroup}) \\r\\n| order by name asc\\r\\n| extend Rank = row_number()\\r\\n| project value = id, label = name, selected = Rank <= 10, group = resourceGroup\",\"crossComponentResources\":[\"{subscriptionId}\"],\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"*\",\"showDefault\":false},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"015d1a5e-357f-4e01-ac77-598e7b493db0\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"timeRange\",\"label\":\"Time Range\",\"type\":4,\"isRequired\":true,\"value\":{\"durationMs\":3600000},\"typeSettings\":{\"selectableValues\":[{\"durationMs\":300000},{\"durationMs\":900000},{\"durationMs\":1800000},{\"durationMs\":3600000},{\"durationMs\":14400000},{\"durationMs\":43200000},{\"durationMs\":86400000},{\"durationMs\":172800000},{\"durationMs\":259200000},{\"durationMs\":604800000},{\"durationMs\":1209600000},{\"durationMs\":2419200000},{\"durationMs\":2592000000}],\"allowCustom\":true}},{\"id\":\"bd6d6075-dc8f-43d3-829f-7e2245a3eb21\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"State\",\"type\":2,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"{\\\"version\\\":\\\"1.0.0\\\",\\\"content\\\":\\\"[ \\\\r\\\\n {\\\\\\\"id\\\\\\\":\\\\\\\"New\\\\\\\", \\\\\\\"label\\\\\\\": \\\\\\\"New\\\\\\\"},\\\\r\\\\n {\\\\\\\"id\\\\\\\":\\\\\\\"Acknowledged\\\\\\\", \\\\\\\"label\\\\\\\": \\\\\\\"Acknowledged\\\\\\\"},\\\\r\\\\n {\\\\\\\"id\\\\\\\":\\\\\\\"Closed\\\\\\\", \\\\\\\"label\\\\\\\": \\\\\\\"Closed\\\\\\\"}\\\\r\\\\n]\\\",\\\"transformers\\\":null}\",\"crossComponentResources\":[\"{Subscription}\"],\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"*\",\"showDefault\":false},\"queryType\":8}],\"style\":\"above\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},\"name\":\"parameters\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"AlertsManagementResources | where type =~ 'microsoft.alertsmanagement/alerts'\\r\\n| where todatetime(properties.essentials.startDateTime) {timeRange} \\r\\n| where \\\"*\\\" in ({resourceGroup}) or properties.essentials.targetResourceGroup in~ ({resourceGroup})\\r\\n| where \\\"*\\\" in ({ResourceTypes}) or properties.essentials.targetResourceType in~ ({ResourceTypes})\\r\\n| where \\\"*\\\" in ({Resources}) or properties.essentials.targetResource in~ ({Resources})\\r\\n| extend State=tostring(properties.essentials.alertState)\\r\\n| where \\\"*\\\" in ({State}) or State in ({State})\\r\\n| summarize Count=count(), New=countif(State==\\\"New\\\"), \\r\\nAcknowledged=countif(State==\\\"Acknowledged\\\"), \\r\\nClosed=countif(State==\\\"Closed\\\") \\r\\nby Severity=tostring(properties.essentials.severity)\\r\\n| order by Severity asc\",\"size\":3,\"title\":\"Alert Summary\",\"noDataMessage\":\"No alerts found\",\"exportMultipleValues\":true,\"exportedParameters\":[{\"fieldName\":\"Severity\",\"parameterName\":\"Severity\",\"parameterType\":1}],\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"{subscriptionId}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"Severity\",\"formatter\":11},{\"columnMatch\":\"Count\",\"formatter\":3,\"formatOptions\":{\"min\":0,\"palette\":\"blue\",\"aggregation\":\"Sum\"},\"numberFormat\":{\"unit\":17,\"options\":{\"style\":\"decimal\",\"maximumFractionDigits\":2}}},{\"columnMatch\":\"State\",\"formatter\":1}]}},\"showPin\":true,\"name\":\"query - 6\"},{\"type\":1,\"content\":{\"json\":\"## Azure Arc-enabled Kubernetes\"},\"name\":\"text - 9\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{resource}\"],\"parameters\":[{\"id\":\"e2b5cd30-7276-477f-a6bb-07da25ba5e5f\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"timeRange\",\"label\":\"Time Range\",\"type\":4,\"description\":\"Filter data by time range\",\"isRequired\":true,\"value\":{\"durationMs\":7776000000},\"typeSettings\":{\"selectableValues\":[{\"durationMs\":300000},{\"durationMs\":900000},{\"durationMs\":1800000},{\"durationMs\":3600000},{\"durationMs\":14400000},{\"durationMs\":43200000},{\"durationMs\":86400000},{\"durationMs\":172800000},{\"durationMs\":259200000},{\"durationMs\":604800000},{\"durationMs\":1209600000},{\"durationMs\":2419200000},{\"durationMs\":2592000000},{\"durationMs\":5184000000},{\"durationMs\":7776000000}],\"allowCustom\":true}},{\"id\":\"b8b76ad0-de1a-4b7c-90a8-f4eb277bb878\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscription\",\"label\":\"Subscription\",\"type\":6,\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"6b8d59ca-08c5-40fb-9962-5061b3e6e779\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workspaceName\",\"label\":\"Log Analytics Workspace\",\"type\":5,\"query\":\"resources\\r\\n| where type contains 'microsoft.operationalinsights/workspaces'\\r\\n| project id\",\"crossComponentResources\":[\"{subscription}\"],\"value\":\"\",\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"7aa94d19-4c5b-40e2-b14f-e29736a8f90c\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resource\",\"label\":\"Azure Arc-enabled K8S cluster\",\"type\":5,\"query\":\" Resources\\r\\n | where type =~ 'microsoft.kubernetes/connectedclusters'\\r\\n | project id\",\"crossComponentResources\":[\"{subscription}\"],\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"value\":\"\"},{\"id\":\"3a3fdabe-6173-4e2b-8658-38c0195fd7e2\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resourceType\",\"type\":7,\"isRequired\":true,\"query\":\"{\\\"version\\\":\\\"1.0.0\\\",\\\"content\\\":\\\"\\\\\\\"{resource:resourcetype}\\\\\\\"\\\",\\\"transformers\\\":null}\",\"typeSettings\":{\"additionalResourceOptions\":[\"value::1\"],\"showDefault\":false},\"defaultValue\":\"value::1\",\"queryType\":8},{\"id\":\"9767de49-ba31-4847-9ffc-714c02e7523c\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"clusterId\",\"type\":1,\"description\":\"Filter workspace by cluster id\",\"isHiddenWhenLocked\":true,\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"cba109cf-db6e-4261-8d3a-fe038593622d\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"clusterIdWhereClause\",\"type\":1,\"description\":\"Add to queries to filter by cluster id\",\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"criteriaContext\":{\"leftOperand\":\"resourceType\",\"operator\":\"contains\",\"rightValType\":\"static\",\"rightVal\":\"microsoft.operationalinsights/workspaces\",\"resultValType\":\"static\",\"resultVal\":\"| where ClusterId =~ '{clusterId}'\"}},{\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"ee080bd8-83dc-4fa0-b688-b2f16b956b92\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadType\",\"label\":\"Workload Type\",\"type\":2,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n| distinct ControllerKind\\r\\n| where isempty(ControllerKind) == false\\r\\n| order by ControllerKind asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::all\",\"queryType\":0,\"resourceType\":\"microsoft.kubernetes/connectedclusters\"},{\"id\":\"cf611d4b-aa93-4949-a7a1-c1d174af29ca\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadKindWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"condition\":\"if (workloadType is not empty ), result = '| where ControllerKind in ({workloadType})'\",\"criteriaContext\":{\"leftOperand\":\"workloadType\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where ControllerKind in ({workloadType})\"}},{\"condition\":\"else result = '| where \\\"a\\\" == \\\"a\\\"'\",\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"034caae5-bee3-4b66-8f80-c120a2a25c77\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"namespace\",\"label\":\"Namespace\",\"type\":2,\"description\":\"Filter the workbook by namespace\",\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n| distinct Namespace\\r\\n| where isnotempty(Namespace)\\r\\n| order by Namespace asc\",\"crossComponentResources\":[\"{Workspace}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"},{\"id\":\"faeee248-e4c3-4fae-b435-ef5fb6dabe3b\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"namespaceWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"condition\":\"if (namespace is not empty ), result = '| where Namespace in ({namespace})'\",\"criteriaContext\":{\"leftOperand\":\"namespace\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where Namespace in ({namespace})\"}},{\"condition\":\"else result = '| where \\\"a\\\" == \\\"a\\\"'\",\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"8943e259-1dde-44cd-a00b-e815eea9de34\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadName\",\"label\":\"Workload Name\",\"type\":2,\"description\":\"Filter the data for a particular workload\",\"isRequired\":true,\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{namespaceWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n| distinct ControllerName\\r\\n| where isnotempty(ControllerName)\\r\\n| order by ControllerName asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::1\"],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::1\",\"queryType\":0,\"resourceType\":\"microsoft.kubernetes/connectedclusters\"},{\"id\":\"00a9be6c-ab0b-400b-b195-9775a47ecddd\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podStatus\",\"label\":\"Pod Status\",\"type\":2,\"description\":\"Filter by Pod status like Pending/Running/Failed etc.\",\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n| where ControllerName == '{workloadName}'\\r\\n| distinct PodStatus\\r\\n| where isnotempty(PodStatus)\\r\\n| order by PodStatus asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::all\",\"queryType\":0,\"resourceType\":\"{resourceType}\",\"value\":[\"value::all\"]},{\"id\":\"388ea6aa-12d8-485a-8e80-b4d7b8994bd8\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podStatusWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"criteriaContext\":{\"leftOperand\":\"podStatus\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where PodStatus in ({podStatus})\"}},{\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":2592000000},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"64de23e6-96b5-4105-b65d-36e40f73f4ec\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podName\",\"label\":\"Pod Name\",\"type\":2,\"description\":\"Filter by pod name \",\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n| where ControllerName == '{workloadName:value}'\\r\\n{podStatusWhereClause}\\r\\n| summarize arg_max(TimeGenerated, PodStatus) by Name\\r\\n| project Name\\r\\n| where isempty(Name) == false\\r\\n| order by Name asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::all\",\"queryType\":0,\"resourceType\":\"{resourceType}\",\"value\":[\"value::all\"]},{\"id\":\"4f7059c2-ebd7-4fc2-86c4-c51e66703582\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podNameWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"condition\":\"if (podName is not empty ), result = '| where PodName in ({podName})'\",\"criteriaContext\":{\"leftOperand\":\"podName\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where PodName in ({podName})\"}},{\"condition\":\"else result = '| where \\\"a\\\" == \\\"a\\\"'\",\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"e60298ff-36da-485e-acea-73c0692b8446\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadNamespaceText\",\"type\":1,\"description\":\"For displaying name space of the selected workload\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where ControllerName == '{workloadName}'\\r\\n| summarize Namespaces=make_set(Namespace)\\r\\n| extend Namespaces = strcat_array(Namespaces, ', ')\",\"crossComponentResources\":[\"{resource}\"],\"isHiddenWhenLocked\":true,\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"{resourceType}\"},{\"id\":\"9f8d0d65-d7bc-42c9-bc5c-b394288b5216\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadTypeText\",\"type\":1,\"description\":\"For displaying workload type of the selected workload\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n| where ControllerName == '{workloadName}'\\r\\n| summarize ControllerKinds=make_set(ControllerKind)\\r\\n| extend ControllerKinds = strcat_array(ControllerKinds, ', ')\",\"crossComponentResources\":[\"{resource}\"],\"isHiddenWhenLocked\":true,\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"{resourceType}\"}],\"style\":\"above\",\"queryType\":0,\"resourceType\":\"microsoft.kubernetes/connectedclusters\"},\"name\":\"pills\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"parameters\":[{\"id\":\"55cc0c6d-51df-4e58-9543-c8b21bc71e29\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podTileStatusWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"criteriaContext\":{\"leftOperand\":\"podStatusTileText\",\"operator\":\"!=\",\"rightValType\":\"static\",\"rightVal\":\"All\",\"resultValType\":\"static\",\"resultVal\":\"| where PodStatus == '{podStatusTileText}'\"}},{\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\"}],\"style\":\"pills\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"asas\"},\"name\":\"pod-status-tile-text\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let endDateTime = {timeRange:end};\\r\\nlet startDateTime = {timeRange:start};\\r\\nlet trendBinSize = {timeRange:grain};\\r\\nlet controllerName= '{workloadName}';\\r\\nKubePodInventory\\r\\n| where TimeGenerated >= startDateTime\\r\\n| where TimeGenerated < endDateTime\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where isnotempty(ClusterName)\\r\\n| where isnotempty(Namespace)\\r\\n| where ControllerName == controllerName\\r\\n| extend PodName = Name\\r\\n{podStatusWhereClause}\\r\\n{podTileStatusWhereClause}\\r\\n{podNameWhereClause}\\r\\n| summarize PodRestartCount=max(PodRestartCount) by PodName, bin(TimeGenerated, trendBinSize)\\r\\n| order by PodName asc nulls last, TimeGenerated asc\\r\\n| serialize \\r\\n| extend prevValue=iif(prev(PodName) == PodName, prev(PodRestartCount), PodRestartCount)\\r\\n| extend RestartCount=PodRestartCount - prevValue\\r\\n| extend RestartCount=iif(RestartCount < 0, 0, RestartCount) \\r\\n| project TimeGenerated, PodName, RestartCount\\r\\n| render timechart\",\"size\":0,\"aggregation\":5,\"showAnalytics\":true,\"title\":\"Azure Arc-enabled kubernetes - Pod Restart Trend\",\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"]},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"pod-restart-trend-chart\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let endDateTime = {timeRange:end};\\r\\nlet startDateTime = {timeRange:start};\\r\\nlet trendBinSize = {timeRange:grain};\\r\\nlet controllerName= '{workloadName:value}';\\r\\nKubePodInventory\\r\\n| where TimeGenerated >= startDateTime\\r\\n| where TimeGenerated < endDateTime\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where isnotempty(ClusterName)\\r\\n| where isnotempty(Namespace)\\r\\n| where ControllerName == controllerName\\r\\n| extend PodName = Name\\r\\n{podStatusWhereClause}\\r\\n{podTileStatusWhereClause}\\r\\n{podNameWhereClause}\\r\\n| extend ContainerName=tostring(split(ContainerName, '/')[1])\\r\\n| where isempty(ContainerName) == false\\r\\n| summarize ContainerRestartCount=sum(ContainerRestartCount) by ContainerName, bin(TimeGenerated, 1tick)\\r\\n| order by ContainerName asc nulls last, TimeGenerated asc\\r\\n| serialize \\r\\n| extend prevValue=iif(prev(ContainerName) == ContainerName, prev(ContainerRestartCount), ContainerRestartCount)\\r\\n| extend RestartCount=ContainerRestartCount - prevValue\\r\\n| extend RestartCount=iif(RestartCount < 0, 0, RestartCount) \\r\\n| project TimeGenerated, ContainerName, RestartCount\\r\\n| summarize RestartCount=sum(RestartCount) by ContainerName, bin(TimeGenerated, trendBinSize)\",\"size\":0,\"aggregation\":5,\"showAnalytics\":true,\"title\":\"Azure Arc-enabled kubernetes - Container restart trend\",\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"timechart\"},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"container-restart-trend-chart\",\"styleSettings\":{\"showBorder\":true}},{\"type\":10,\"content\":{\"chartId\":\"workbook3e0e301c-50cf-4e53-ac2a-40f5eed823a0\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":3600000},\"metrics\":[{\"namespace\":\"insights.container/pods\",\"metric\":\"insights.container/pods--PodCount\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled kubernetes - Pod Count\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 19\"},{\"type\":10,\"content\":{\"chartId\":\"workbook167c4490-9cde-4fcd-be0f-401070f13ccd\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":3600000},\"metrics\":[{\"namespace\":\"insights.container/pods\",\"metric\":\"insights.container/pods--PodReadyPercentage\",\"aggregation\":4,\"splitBy\":null},{\"namespace\":\"insights.container/pods\",\"metric\":\"insights.container/pods--restartingContainerCount\",\"aggregation\":4}],\"title\":\"Azure Arc-enabled Kubernetes - Pod status\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 20\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let endDateTime = {timeRange:end};\\r\\nlet startDateTime = {timeRange:start};\\r\\nlet trendBinSize = {timeRange:grain};\\r\\nlet controllerName= '{workloadName}';\\r\\nKubePodInventory\\r\\n| where TimeGenerated >= startDateTime\\r\\n| where TimeGenerated < endDateTime\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where isnotempty(ClusterName)\\r\\n| where isnotempty(Namespace)\\r\\n| extend PodName = Name\\r\\n{podStatusWhereClause}\\r\\n{podNameWhereClause}\\r\\n| where ControllerName == controllerName\\r\\n| extend InstanceName = strcat(ClusterId, '/', ContainerName),\\r\\n ContainerName = strcat(Name, '/', tostring(split(ContainerName, '/')[1]))\\r\\n| summarize arg_max(TimeGenerated, *) by ContainerName, Name\\r\\n{podTileStatusWhereClause}\\r\\n| extend ContainerLastStatus = todynamic(ContainerLastStatus) \\r\\n| project TimeGenerated, ContainerName, PodStatus, ContainerStatus, LastState=ContainerLastStatus.lastState, LastStateReason=ContainerLastStatus.reason, LastStateStartTime=ContainerLastStatus.startedAt,\\r\\nLastStateFinishTime=ContainerLastStatus.finishedAt\\r\\n\",\"size\":0,\"aggregation\":5,\"showAnalytics\":true,\"title\":\"Azure Arc-enabled kubernetes - Container Status for Pods\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"TimeGenerated\",\"formatter\":6,\"formatOptions\":{},\"dateFormat\":{\"showUtcTime\":null,\"formatName\":\"shortDateTimePattern\"}},{\"columnMatch\":\"PodStatus\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"Running\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"Pending\",\"representation\":\"pending\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"Failed\",\"representation\":\"failed\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Blank\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"ContainerStatus\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"Running\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"waiting\",\"representation\":\"pending\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"success\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"LastState\",\"formatter\":0,\"formatOptions\":{},\"numberFormat\":{\"unit\":0,\"options\":{\"style\":\"decimal\"},\"emptyValCustomText\":\"-\"}},{\"columnMatch\":\"LastStateReason\",\"formatter\":0,\"formatOptions\":{},\"numberFormat\":{\"unit\":0,\"options\":{\"style\":\"decimal\"},\"emptyValCustomText\":\"-\"}},{\"columnMatch\":\"LastStateStartTime\",\"formatter\":6,\"formatOptions\":{},\"dateFormat\":{\"showUtcTime\":null,\"formatName\":\"shortDateTimePattern\"}},{\"columnMatch\":\"LastStateFinishTime\",\"formatter\":6,\"formatOptions\":{},\"dateFormat\":{\"showUtcTime\":null,\"formatName\":\"shortDateTimePattern\"}}]},\"sortBy\":[]},\"showPin\":true,\"name\":\"container-status-for-pods-chart\"},{\"type\":10,\"content\":{\"chartId\":\"workbook87327d65-b260-4473-9f2b-5d90b1100543\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":2592000000},\"metrics\":[{\"namespace\":\"insights.container/nodes\",\"metric\":\"insights.container/nodes--cpuUsagePercentage\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled kubernetes cluster - Node CPU usage %\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 17\"},{\"type\":10,\"content\":{\"chartId\":\"workbook2f202f95-1281-4077-a49b-31c3e3d3271b\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":3600000},\"metrics\":[{\"namespace\":\"insights.container/nodes\",\"metric\":\"insights.container/nodes--memoryWorkingSetPercentage\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled kubernetes cluster - Node memory working set %\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 18\"},{\"type\":1,\"content\":{\"json\":\"## Azure Arc-enabled SQL Managed Instance\"},\"name\":\"text - 18\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{subscription}\"],\"parameters\":[{\"id\":\"be802690-79de-4708-8629-4c57b0d78085\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscriptionId\",\"label\":\"Subscription\",\"type\":6,\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"3bd2e749-7c3f-47fd-9f8a-7ab118be8850\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workspaceName\",\"label\":\"Log Analytics Workspace\",\"type\":5,\"query\":\"resources\\r\\n| where type contains 'microsoft.operationalinsights/workspaces'\\r\\n| project id\",\"crossComponentResources\":[\"{subscription}\"],\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"a308510f-a9f5-4ee4-a4b1-a175aa96b290\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"SQLMI\",\"label\":\"Azure Arc-enabled SQL MI\",\"type\":5,\"query\":\" Resources\\r\\n | where type =~ 'Microsoft.AzureArcData/sqlManagedInstances'\\r\\n | project id\",\"crossComponentResources\":[\"{subscription}\"],\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"}],\"style\":\"above\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},\"name\":\"pills - Copy\"},{\"type\":10,\"content\":{\"chartId\":\"789dd9d3-afbc-4440-8e31-7fe124f7b9ce\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.azurearcdata/sqlmanagedinstances\",\"metricScope\":0,\"resourceParameter\":\"SQLMI\",\"resourceIds\":[\"{SQLMI}\"],\"timeContext\":{\"durationMs\":86400000},\"metrics\":[{\"namespace\":\"sql server\",\"metric\":\"sql server--CPU Usage: pod-0\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled SQL MI - CPU usage\",\"gridSettings\":{\"rowLimit\":10000}},\"name\":\"metric - 0\"},{\"type\":10,\"content\":{\"chartId\":\"540fb39f-7903-4cc8-af49-679ee1f331fe\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.azurearcdata/sqlmanagedinstances\",\"metricScope\":0,\"resourceParameter\":\"SQLMI\",\"resourceIds\":[\"{SQLMI}\"],\"timeContext\":{\"durationMs\":86400000},\"metrics\":[{\"namespace\":\"sql server\",\"metric\":\"sql server--Memory Usage: pod-0\",\"aggregation\":4,\"splitBy\":null}],\"gridSettings\":{\"rowLimit\":10000}},\"name\":\"metric - 0\"},{\"type\":10,\"content\":{\"chartId\":\"31b401d2-6d90-4a57-a61a-d6e458523448\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.azurearcdata/sqlmanagedinstances\",\"metricScope\":0,\"resourceParameter\":\"SQLMI\",\"resourceIds\":[\"{SQLMI}\"],\"timeContext\":{\"durationMs\":86400000},\"metrics\":[{\"namespace\":\"sql server\",\"metric\":\"sql server--Transactions/second: pod-0\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled SQL MI - Transactions per Second\",\"gridSettings\":{\"rowLimit\":10000}},\"name\":\"metric - 0\"}]},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"Monitoring\"},\"name\":\"Monitoring\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\"## Jumpstart ArcBox resource inventory\\r\\n\\r\\n💡 Select your Azure ArcBox subscription and Resource Group to see more information.\"},\"name\":\"text - 4\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{subscriptionId}\"],\"parameters\":[{\"id\":\"984514df-fff0-434c-a373-7090566e8c44\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscriptionId\",\"type\":6,\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":86400000},\"label\":\"Subscription\"},{\"id\":\"cb849a6b-937d-4e93-8d09-770554777009\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resourceGroup\",\"label\":\"Resource Group\",\"type\":2,\"query\":\"Resources\\r\\n| summarize by resourceGroup\\r\\n| order by resourceGroup asc\\r\\n| project id=resourceGroup, resourceGroup\",\"crossComponentResources\":[\"{subscriptionId}\"],\"value\":\"arcboxdataops\",\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":86400000},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"0fd9f40f-ffe0-4894-adc7-64866aa4b1e4\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"ResourceType\",\"label\":\"Resources\",\"type\":7,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":86400000},\"value\":[\"value::all\"]}],\"style\":\"pills\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},\"name\":\"parameters - 1\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Resources \\r\\n| where resourceGroup == \\\"{resourceGroup}\\\"\\r\\n| extend type = case(\\r\\ntype contains 'microsoft.netapp/netappaccounts', 'NetApp Accounts',\\r\\ntype contains \\\"microsoft.compute\\\", \\\"Azure Compute\\\",\\r\\ntype contains \\\"microsoft.logic\\\", \\\"LogicApps\\\",\\r\\ntype contains 'microsoft.keyvault/vaults', \\\"Key Vaults\\\",\\r\\ntype contains 'microsoft.storage/storageaccounts', \\\"Storage Accounts\\\",\\r\\ntype contains 'microsoft.compute/availabilitysets', 'Availability Sets',\\r\\ntype contains 'microsoft.operationalinsights/workspaces', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.operationsmanagement', 'Operations Management Resources',\\r\\ntype contains 'microsoft.insights', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.desktopvirtualization/applicationgroups', 'WVD Application Groups',\\r\\ntype contains 'microsoft.desktopvirtualization/workspaces', 'WVD Workspaces',\\r\\ntype contains 'microsoft.desktopvirtualization/hostpools', 'WVD Hostpools',\\r\\ntype contains 'microsoft.recoveryservices/vaults', 'Backup Vaults',\\r\\ntype contains 'microsoft.web', 'App Services',\\r\\ntype contains 'microsoft.managedidentity/userassignedidentities','Managed Identities',\\r\\ntype contains 'microsoft.storagesync/storagesyncservices', 'Azure File Sync',\\r\\ntype contains 'microsoft.hybridcompute/machines', 'Azure Arc-enabled servers ',\\r\\ntype contains 'Microsoft.EventHub', 'Event Hub',\\r\\ntype contains 'Microsoft.EventGrid', 'Event Grid',\\r\\ntype contains 'Microsoft.Sql', 'SQL Resources',\\r\\ntype contains 'Microsoft.HDInsight/clusters', 'HDInsight Clusters',\\r\\ntype contains 'microsoft.devtestlab', 'DevTest Labs Resources',\\r\\ntype contains 'microsoft.containerinstance', 'Container Instances Resources',\\r\\ntype contains 'microsoft.portal/dashboards', 'Azure Dashboards',\\r\\ntype contains 'microsoft.containerregistry/registries', 'Container Registry',\\r\\ntype contains 'microsoft.automation', 'Automation Resources',\\r\\ntype contains 'sendgrid.email/accounts', 'SendGrid Accounts',\\r\\ntype contains 'microsoft.datafactory/factories', 'Data Factory',\\r\\ntype contains 'microsoft.databricks/workspaces', 'Databricks Workspaces',\\r\\ntype contains 'microsoft.machinelearningservices/workspaces', 'Machine Learnings Workspaces',\\r\\ntype contains 'microsoft.alertsmanagement/smartdetectoralertrules', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.apimanagement/service', 'API Management Services',\\r\\ntype contains 'microsoft.dbforpostgresql', 'PostgreSQL Resources',\\r\\ntype contains 'microsoft.scheduler/jobcollections', 'Scheduler Job Collections',\\r\\ntype contains 'microsoft.visualstudio/account', 'Azure DevOps Organization',\\r\\ntype contains 'microsoft.network/', 'Network Resources',\\r\\ntype contains 'microsoft.migrate/' or type contains 'microsoft.offazure', 'Azure Migrate Resources',\\r\\ntype contains 'microsoft.servicebus/namespaces', 'Service Bus Namespaces',\\r\\ntype contains 'microsoft.classic', 'ASM Obsolete Resources',\\r\\ntype contains 'microsoft.resources/templatespecs', 'Template Spec Resources',\\r\\ntype contains 'microsoft.virtualmachineimages', 'VM Image Templates',\\r\\ntype contains 'microsoft.documentdb', 'CosmosDB DB Resources',\\r\\ntype contains 'microsoft.alertsmanagement/actionrules', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.kubernetes/connectedclusters', 'Azure Arc-enabled Kubernetes',\\r\\ntype contains 'microsoft.purview', 'Purview Resources',\\r\\ntype contains 'microsoft.security', 'Security Resources',\\r\\ntype contains 'microsoft.cdn', 'CDN Resources',\\r\\ntype contains 'microsoft.devices','IoT Resources',\\r\\ntype contains 'microsoft.datamigration', 'Data Migraiton Services',\\r\\ntype contains 'microsoft.cognitiveservices', 'Congitive Services',\\r\\ntype contains 'microsoft.customproviders', 'Custom Providers',\\r\\ntype contains 'microsoft.appconfiguration', 'App Services',\\r\\ntype contains 'microsoft.search', 'Search Services',\\r\\ntype contains 'microsoft.maps', 'Maps',\\r\\ntype contains 'microsoft.containerservice/managedclusters', 'AKS',\\r\\ntype contains 'microsoft.signalrservice', 'SignalR',\\r\\ntype contains 'microsoft.resourcegraph/queries', 'Resource Graph Queries',\\r\\ntype contains 'microsoft.batch', 'MS Batch',\\r\\ntype contains 'microsoft.analysisservices', 'Analysis Services',\\r\\ntype contains 'microsoft.synapse/workspaces', 'Synapse Workspaces',\\r\\ntype contains 'microsoft.synapse/workspaces/sqlpools', 'Synapse SQL Pools',\\r\\ntype contains 'microsoft.kusto/clusters', 'ADX Clusters',\\r\\ntype contains 'microsoft.resources/deploymentscripts', 'Deployment Scripts',\\r\\ntype contains 'microsoft.aad/domainservices', 'AD Domain Services',\\r\\ntype contains 'microsoft.labservices/labaccounts', 'Lab Accounts',\\r\\ntype contains 'microsoft.automanage/accounts', 'Automanage Accounts',\\r\\ntype contains 'microsoft.extendedlocation/customlocations', 'Azure Arc Custom Locations',\\r\\ntype contains 'microsoft.azurearcdata/postgresinstances', 'Azure Arc-enabled PostgresSQL',\\r\\ntype contains 'microsoft.azurearcdata/sqlmanagedinstances', 'Azure Arc-enabled SQL Managed Instance',\\r\\ntype contains 'microsoft.azurearcdata/datacontrollers', 'Azure Arc-enabled data controller',\\r\\ntype contains 'microsoft.azurearcdata/sqlserverinstances', 'Azure Arc-enabled SQL server',\\r\\nstrcat(\\\"Not Translated: \\\", type))\\r\\n| summarize count() by type\",\"size\":1,\"title\":\"Resource Count by Type\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"{subscriptionId}\"],\"visualization\":\"tiles\",\"tileSettings\":{\"titleContent\":{\"columnMatch\":\"type\",\"formatter\":1},\"leftContent\":{\"columnMatch\":\"count_\",\"formatter\":12,\"formatOptions\":{\"palette\":\"auto\"},\"numberFormat\":{\"unit\":17,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumFractionDigits\":2,\"maximumSignificantDigits\":3}}},\"showBorder\":true,\"sortCriteriaField\":\"count_\",\"sortOrderField\":2}},\"name\":\"query - Overview Resource Counts by type\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Resources\\r\\n| where type in~ ({ResourceType})\\r\\n| where resourceGroup == \\\"{resourceGroup}\\\"\\r\\n| project Resource = id, Subscription = subscriptionId, ['Resource group'] = strcat('/subscriptions/', subscriptionId, '/resourceGroups/', resourceGroup), Location = location, tags\",\"size\":2,\"title\":\"Resources List\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"{subscriptionId}\"]},\"name\":\"query - 2\"}]},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"Inventory\"},\"name\":\"Inventory\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\"## Jumpstart ArcBox security overview\"},\"name\":\"text - 5\"},{\"type\":1,\"content\":{\"json\":\"💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\\r\\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\\r\\n2. Select the specific Azure subscription for which you want to configure the data export.\\r\\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\\r\\n4. Set the export target to **Log Analytics workspace**.\\r\\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\\r\\n6. From the export frequency options, select **Streaming** and **Snapshots**.\\r\\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\\r\\n\\r\\n[Learn more](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\\r\\n\\r\\n> **Notes**\\r\\n* To get full visibility, wait at least one week for the first snapshot to be exported.\\r\\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export).\"},\"showPin\":false,\"name\":\"Instructions\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{workspaceName}\"],\"parameters\":[{\"id\":\"ae721cb1-e030-4e02-8839-9c6a00f66c8a\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workspaceName\",\"type\":5,\"description\":\"Select at least one workspace that contains continuous export data based on the selected subscriptions\",\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"resources\\r\\n| where type =~ 'microsoft.operationalinsights/workspaces'\\r\\n| project id\",\"crossComponentResources\":[\"value::selected\"],\"value\":[\"value::all\"],\"typeSettings\":{\"resourceTypeFilter\":{\"microsoft.operationalinsights/workspaces\":true},\"additionalResourceOptions\":[\"value::all\"],\"showDefault\":false},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"label\":\"Log Analytics Workspace\"},{\"id\":\"4f3a03fd-9968-4ee7-b6bc-d04d3bbe14a8\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"TimeRange\",\"label\":\"Time Range\",\"type\":4,\"description\":\"Filter the data of this report to one of these predefined time ranges\",\"isRequired\":true,\"value\":{\"durationMs\":2592000000},\"typeSettings\":{\"selectableValues\":[{\"durationMs\":259200000},{\"durationMs\":604800000},{\"durationMs\":1209600000},{\"durationMs\":2419200000},{\"durationMs\":2592000000}],\"allowCustom\":true}},{\"id\":\"0117bdc3-a4e2-476b-b7cc-3d1f486e67cf\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"ErrorHandle\",\"type\":1,\"query\":\"let MissingTable = view () { print isMissing=1 };\\r\\nunion isfuzzy=true MissingTable, (SecureScores | getschema | summarize c=count() | project isMissing=iff(c > 0, 0, 1))\\r\\n| top 1 by isMissing asc\",\"crossComponentResources\":[\"{workspaceName}\"],\"isHiddenWhenLocked\":true,\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"}],\"style\":\"above\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"},\"name\":\"Parameters\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\" Current score trends per subscription (not affected by the time range parameter)\\r\\n\"},\"customWidth\":\"50\",\"name\":\"text - 3\"},{\"type\":1,\"content\":{\"json\":\" Aggregated score for selected subscriptions over time\\r\\n\"},\"customWidth\":\"50\",\"name\":\"text - 4\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Current score trends per subscription (show current, 7-day change from the current score as %, 30-day change from the current score as %)\\r\\nlet startOfToday = startofday(now()); \\r\\nlet offsetWeek = 6d; \\r\\nlet offsetMonth = 29d; \\r\\nlet lookbackDurationWeek = 14d; \\r\\nlet lookbackDurationMonth = 45d; \\r\\nlet endTimeWeek = startOfToday - offsetWeek; \\r\\nlet startTimeWeek = endTimeWeek - lookbackDurationWeek; \\r\\nlet endTimeMonth = startOfToday - offsetMonth; \\r\\nlet startTimeMonth = endTimeMonth - lookbackDurationMonth; \\r\\nSecureScores \\r\\n| extend Day = startofday(TimeGenerated) \\r\\n| summarize arg_max(TimeGenerated, *) by Day, SecureScoresSubscriptionId \\r\\n| summarize arg_max(Day, *) by SecureScoresSubscriptionId \\r\\n| join kind = fullouter( \\r\\n SecureScores \\r\\n | extend Day = startofday(TimeGenerated) \\r\\n | where TimeGenerated > startTimeWeek and TimeGenerated <= endTimeWeek \\r\\n | summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId \\r\\n | project OldScoreSevenDays = PercentageScore, SecureScoresSubscriptionId \\r\\n ) \\r\\n on SecureScoresSubscriptionId \\r\\n| join kind = fullouter( \\r\\n SecureScores \\r\\n | extend Day = startofday(TimeGenerated) \\r\\n | where TimeGenerated > startTimeMonth and TimeGenerated <= endTimeMonth \\r\\n | summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId \\r\\n | project OldMonthScore = PercentageScore, SecureScoresSubscriptionId \\r\\n ) \\r\\n on SecureScoresSubscriptionId \\r\\n| extend DiffSevenDays = tostring(((PercentageScore - OldScoreSevenDays) / OldScoreSevenDays) * 100) \\r\\n| extend DiffSevenDays = iff(isempty(DiffSevenDays), \\\"\\\", DiffSevenDays) \\r\\n| extend DiffMonth = tostring(((PercentageScore - OldMonthScore) / OldMonthScore) * 100) \\r\\n| extend DiffMonth = iff(isempty(DiffMonth), \\\"\\\", DiffMonth) \\r\\n| project SecureScoresSubscriptionId, CurrentScore = PercentageScore * 100, todouble(DiffSevenDays), todouble(DiffMonth)\",\"size\":0,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"exportFieldName\":\"SecureScoresSubscriptionId\",\"exportParameterName\":\"selectedSubscription\",\"exportDefaultValue\":\"All\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"SecureScoresSubscriptionId\",\"formatter\":15,\"formatOptions\":{\"linkTarget\":null,\"showIcon\":true,\"customColumnWidthSetting\":\"25ch\"}},{\"columnMatch\":\"CurrentScore\",\"formatter\":4,\"formatOptions\":{\"min\":0,\"max\":100,\"palette\":\"redGreen\",\"customColumnWidthSetting\":\"20ch\"},\"numberFormat\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumSignificantDigits\":2}}},{\"columnMatch\":\"DiffSevenDays\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"<\",\"thresholdValue\":\"0\",\"representation\":\"trenddown\",\"text\":\"{0}{1}\"},{\"operator\":\">\",\"thresholdValue\":\"0\",\"representation\":\"trendup\",\"text\":\"{0}{1}\"},{\"operator\":\"is Empty\",\"thresholdValue\":\"0\",\"representation\":\"Normal\",\"text\":\"N/A\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Blank\",\"text\":\"{0}{1}\"}],\"customColumnWidthSetting\":\"20ch\"},\"numberFormat\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumSignificantDigits\":2}}},{\"columnMatch\":\"DiffMonth\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"<\",\"thresholdValue\":\"0\",\"representation\":\"trenddown\",\"text\":\"{0}{1}\"},{\"operator\":\">\",\"thresholdValue\":\"0\",\"representation\":\"trendup\",\"text\":\"{0}{1}\"},{\"operator\":\"is Empty\",\"thresholdValue\":\"0\",\"representation\":\"Normal\",\"text\":\"N/A\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Blank\",\"text\":\"{0}{1}\"}],\"customColumnWidthSetting\":\"20ch\"},\"numberFormat\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumSignificantDigits\":2}}}],\"rowLimit\":500,\"sortBy\":[{\"itemKey\":\"$gen_link_SecureScoresSubscriptionId_0\",\"sortOrder\":1}],\"labelSettings\":[{\"columnId\":\"SecureScoresSubscriptionId\",\"label\":\"Subscription name\"},{\"columnId\":\"CurrentScore\",\"label\":\"Current score %\"},{\"columnId\":\"DiffSevenDays\",\"label\":\"7-day change\"},{\"columnId\":\"DiffMonth\",\"label\":\"30-day change\"}]},\"sortBy\":[{\"itemKey\":\"$gen_link_SecureScoresSubscriptionId_0\",\"sortOrder\":1}]},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"ScoreTrends\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Aggregated score for all subscriptions over time\\r\\nSecureScores\\r\\n| where '{selectedSubscription}' == 'All' or SecureScoresSubscriptionId == '{selectedSubscription}'\\r\\n| where MaxScore>0\\r\\n| extend subscriptionScore = CurrentScore/MaxScore \\r\\n| extend subScoreXsubWeight = subscriptionScore*Weight \\r\\n| extend Day = startofday(TimeGenerated) \\r\\n| summarize upperValue = sum(subScoreXsubWeight), underValue = sum(todouble(Weight)) by Day\\r\\n| extend OverallScore = 100*((upperValue)/(underValue))\\r\\n| project OverallScore, Day\",\"size\":0,\"aggregation\":5,\"showAnnotations\":true,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"timeBrushParameterName\":\"TimeRange\",\"timeBrushExportOnlyWhenBrushed\":true,\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"timechart\",\"chartSettings\":{\"seriesLabelSettings\":[{\"seriesName\":\"overallScore\",\"label\":\"Overall Score\",\"color\":\"lightBlue\"}],\"ySettings\":{\"min\":0,\"max\":100}}},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"ScoreOvertime\"}],\"exportParameters\":true},\"conditionalVisibilities\":[{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"},{\"parameterName\":\"ErrorHandle\",\"comparison\":\"isNotEqualTo\",\"value\":\"1\"}],\"name\":\"SecureScore\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\" Top recommendations with recent increase in unhealthy resources\\r\\n \\r\\n Recommendations with the most resources that have become unhealthy in the periods shown\"},\"customWidth\":\"50\",\"name\":\"UnhealthyRecommendations\"},{\"type\":1,\"content\":{\"json\":\" Security controls scores over time (weekly)\\r\\n\\r\\n\\r\\n\"},\"customWidth\":\"50\",\"name\":\"text - 3\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Top recommendations with recent increase in unhealthy resources\\r\\nSecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState =~ \\\"Unhealthy\\\"\\r\\n| summarize UnhealthyAssessedResources = dcount(AssessedResourceId),RecommendationName = any(RecommendationName) by RecommendationId\\r\\n| project RecommendationName, UnhealthyAssessedResources\\r\\n| sort by UnhealthyAssessedResources desc\\r\\n| take 10\",\"size\":0,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"table\",\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"RecommendationName\",\"formatter\":0,\"formatOptions\":{\"customColumnWidthSetting\":\"70ch\"},\"numberFormat\":{\"unit\":0,\"options\":{\"style\":\"decimal\",\"useGrouping\":false}},\"tooltipFormat\":{\"tooltip\":\"View recommendation '{0}'\"}},{\"columnMatch\":\"UnhealthyAssessedResources\",\"formatter\":4,\"formatOptions\":{\"min\":0,\"palette\":\"blue\",\"compositeBarSettings\":{\"labelText\":\"\",\"columnSettings\":[]},\"customColumnWidthSetting\":\"25ch\"}},{\"columnMatch\":\"RecommendationId\",\"formatter\":5}],\"labelSettings\":[{\"columnId\":\"RecommendationName\",\"label\":\"Recommendation name\"},{\"columnId\":\"UnhealthyAssessedResources\",\"label\":\"Unhealthy count\"}]},\"tileSettings\":{\"showBorder\":false,\"titleContent\":{\"columnMatch\":\"RecommendationName\",\"formatter\":1},\"leftContent\":{\"columnMatch\":\"UnhealthyCount\",\"formatter\":12,\"formatOptions\":{\"palette\":\"auto\"},\"numberFormat\":{\"unit\":17,\"options\":{\"maximumSignificantDigits\":3,\"maximumFractionDigits\":2}}}},\"graphSettings\":{\"type\":0,\"topContent\":{\"columnMatch\":\"RecommendationName\",\"formatter\":1},\"centerContent\":{\"columnMatch\":\"UnhealthyCount\",\"formatter\":1,\"numberFormat\":{\"unit\":17,\"options\":{\"maximumSignificantDigits\":3,\"maximumFractionDigits\":2}}}}},\"customWidth\":\"50\",\"name\":\"query - 7\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Security controls score over time (weekly) \\r\\nlet subscriptionsWeight = \\r\\n SecureScores\\r\\n | where '{selectedSubscription}' == 'All' or SecureScoresSubscriptionId == '{selectedSubscription}'\\r\\n | summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId \\r\\n | project SecureScoresSubscriptionId, SubscriptionWeight = Weight; \\r\\nSecureScoreControls \\r\\n| where MaxScore > 0\\r\\n| where IsSnapshot == true\\r\\n| extend Week = startofweek(TimeGenerated) \\r\\n| summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId, ControlId, Week \\r\\n| join kind=inner(\\r\\n subscriptionsWeight\\r\\n ) on SecureScoresSubscriptionId \\r\\n| extend WeightedControlScore = PercentageScore * SubscriptionWeight \\r\\n| summarize WeightedScoreAvg = sum(WeightedControlScore)/sum(SubscriptionWeight)*100, ControlName = any(ControlName) by ControlId, Week\\r\\n| order by WeightedScoreAvg desc\",\"size\":0,\"aggregation\":5,\"showAnnotations\":true,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"timechart\",\"graphSettings\":{\"type\":0,\"topContent\":{\"columnMatch\":\"ControlId\",\"formatter\":1},\"centerContent\":{\"columnMatch\":\"WeightedAvgPerControl\",\"formatter\":1,\"numberFormat\":{\"unit\":17,\"options\":{\"maximumSignificantDigits\":3,\"maximumFractionDigits\":2}}}},\"chartSettings\":{\"group\":\"ControlName\",\"createOtherGroup\":0,\"showLegend\":true,\"ySettings\":{\"numberFormatSettings\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":true}},\"min\":0,\"max\":100}}},\"customWidth\":\"50\",\"name\":\"Controls\"}]},\"conditionalVisibilities\":[{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"},{\"parameterName\":\"ErrorHandle\",\"comparison\":\"isNotEqualTo\",\"value\":\"1\"}],\"name\":\"group - 8\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\" Resources changed over time\\r\\n \\r\\n Select a recommendation to see its changes\"},\"name\":\"text - 2\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Recommendations changes over time (count how many resources have been changed to unhealthy, heathy, and not applicable, per recommendation) \\r\\nlet unhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Unhealthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet healthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Healthy' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet notApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'NotApplicable' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notUnhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Unhealthy' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notHealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Healthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notNotApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'NotApplicable' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notHealthyToHealthy = \\r\\n notHealthy \\r\\n | join (\\r\\n healthy\\r\\n ) on RecommendationId, AssessedResourceId \\r\\n | where TimeGenerated < TimeGenerated1 \\r\\n | summarize ToHealthyCount = count(), RecommendationName=any(RecommendationName) by RecommendationId;\\r\\nlet notUnhealthyToUnhealthy = \\r\\n notUnhealthy \\r\\n | join ( \\r\\n unhealthy\\r\\n ) on RecommendationId, AssessedResourceId \\r\\n | where TimeGenerated < TimeGenerated1 \\r\\n | summarize ToUnhealthyCount = count(), RecommendationName=any(RecommendationName) by RecommendationId;\\r\\nlet notNotApplicableToNotApplicable = \\r\\n notNotApplicable \\r\\n | join (\\r\\n notApplicable\\r\\n ) on RecommendationId, AssessedResourceId \\r\\n | where TimeGenerated < TimeGenerated1 \\r\\n | summarize ToNotApplicableCount = count(), RecommendationName=any(RecommendationName) by RecommendationId;\\r\\n// Union \\r\\nunion notHealthyToHealthy, notUnhealthyToUnhealthy, notNotApplicableToNotApplicable\\r\\n| summarize RecommendationName=any(RecommendationName), ToUnhealthyCount = sum(ToUnhealthyCount), ToHealthyCount = sum(ToHealthyCount), ToNotApplicableCount = sum(ToNotApplicableCount) by RecommendationId\\r\\n| order by ToUnhealthyCount desc\",\"size\":0,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"exportedParameters\":[{\"fieldName\":\"RecommendationId\",\"parameterName\":\"RecommendationId\"},{\"fieldName\":\"RecommendationName\",\"parameterName\":\"RecommendationName\",\"parameterType\":1}],\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"RecommendationId\",\"formatter\":5},{\"columnMatch\":\"RecommendationName\",\"formatter\":7,\"formatOptions\":{\"linkTarget\":\"Url\",\"bladeOpenContext\":{\"bladeName\":\"RecommendationsBlade\",\"extensionName\":\"Microsoft_Azure_Security\",\"bladeParameters\":[{\"name\":\"assessmentKey\",\"source\":\"column\",\"value\":\"RecommendationId\"}]},\"customColumnWidthSetting\":\"100ch\"}},{\"columnMatch\":\"UnhealthyCount\",\"formatter\":8,\"formatOptions\":{\"palette\":\"redBright\"}},{\"columnMatch\":\"HealthyCount\",\"formatter\":8,\"formatOptions\":{\"palette\":\"green\"}},{\"columnMatch\":\"NotApplicableCount\",\"formatter\":8,\"formatOptions\":{\"palette\":\"gray\"}},{\"columnMatch\":\"AssessedResourceId\",\"formatter\":13,\"formatOptions\":{\"linkTarget\":null,\"showIcon\":true}}],\"labelSettings\":[{\"columnId\":\"RecommendationName\",\"label\":\"Recommendation name\"},{\"columnId\":\"ToUnhealthyCount\",\"label\":\"To unhealthy\"},{\"columnId\":\"ToHealthyCount\",\"label\":\"To healthy\"},{\"columnId\":\"ToNotApplicableCount\",\"label\":\"To not applicable\"}]},\"sortBy\":[]},\"name\":\"RecommendationStatusChanges\"},{\"type\":1,\"content\":{\"json\":\"To view changes over time on a specific recommendation, please select any from the list above.\",\"style\":\"info\"},\"conditionalVisibility\":{\"parameterName\":\"RecommendationId\",\"comparison\":\"isEqualTo\"},\"name\":\"ChangeLogBanner\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let unhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Unhealthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId\\r\\n| project UnhealthyRecommendationId = RecommendationId, UnhealthyResourceId = AssessedResourceId, UnhealhyTime = TimeGenerated, tostring(SubscriptionId);\\r\\nlet notApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'NotApplicable'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId\\r\\n| project NARecommendationId = RecommendationId, NAResourceId = AssessedResourceId, NATime = TimeGenerated, tostring(SubscriptionId);\\r\\nlet healthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Healthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId\\r\\n| project HealthyRecommendationId = RecommendationId, HealthyResourceId = AssessedResourceId, HealhyTime = TimeGenerated, tostring(SubscriptionId);\\r\\nlet NotHealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Healthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet NotUnhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Unhealthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet NotNotApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'NotApplicable'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet 1_to_Healthy = \\r\\n NotHealthy\\r\\n | extend orignalState = RecommendationState\\r\\n | join healthy on $left.RecommendationId == $right.HealthyRecommendationId, $left.AssessedResourceId == $right.HealthyResourceId\\r\\n | where TimeGenerated < HealhyTime\\r\\n | extend update = \\\"To healthy\\\"\\r\\n | project RecommendationId, RecommendationName, Description, OriginalState = RecommendationState, update, TimeGenerated, RecommendationSeverity, tostring(SubscriptionId), AssessedResourceId, RecommendationLink;\\r\\n//1_to_Healthy\\r\\nlet 2_to_Unhealthy = \\r\\n NotUnhealthy\\r\\n | extend orignalState = RecommendationState\\r\\n | join unhealthy on $left.RecommendationId == $right.UnhealthyRecommendationId, $left.AssessedResourceId == $right.UnhealthyResourceId\\r\\n | where TimeGenerated < UnhealhyTime\\r\\n | extend update = \\\"To unhealthy\\\"\\r\\n | project RecommendationId, RecommendationName, Description, OriginalState = RecommendationState, update, TimeGenerated, RecommendationSeverity, tostring(SubscriptionId), AssessedResourceId, RecommendationLink;\\r\\n//2_to_Unhealthy\\r\\nlet 3_to_NotApplicable = \\r\\n NotNotApplicable\\r\\n | extend orignalState = RecommendationState\\r\\n | join notApplicable on $left.RecommendationId == $right.NARecommendationId, $left.AssessedResourceId == $right.NAResourceId\\r\\n | where TimeGenerated < NATime\\r\\n | extend update = \\\"To not applicable\\\"\\r\\n | extend NotApplicableReason = iff(isempty(NotApplicableReason), \\\"NA\\\", NotApplicableReason)\\r\\n | project RecommendationId, RecommendationName, Description, OriginalState = RecommendationState, update, TimeGenerated, RecommendationSeverity, tostring(SubscriptionId), AssessedResourceId, RecommendationLink, NotApplicableReason;\\r\\n// JOIN\\r\\nunion 1_to_Healthy, 2_to_Unhealthy, 3_to_NotApplicable\\r\\n| extend FullRecommendationLink = strcat(\\\"http://\\\",RecommendationLink)\\r\\n| extend AssessedResourceId = iff(AssessedResourceId==\\\"N/A\\\", extract(\\\".*onPremiseMachines/(.+)\\\",1, url_decode(RecommendationLink)), AssessedResourceId)\\r\\n| project-away RecommendationLink\\r\\n| where RecommendationId == '{RecommendationId}'\",\"size\":0,\"title\":\"Changes for \\\"{RecommendationName}\\\"\",\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"showExportToExcel\":true,\"exportToExcelOptions\":\"all\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"RecommendationId\",\"formatter\":5},{\"columnMatch\":\"RecommendationName\",\"formatter\":5},{\"columnMatch\":\"Description\",\"formatter\":5},{\"columnMatch\":\"SubscriptionId\",\"formatter\":15,\"formatOptions\":{\"linkTarget\":null,\"showIcon\":true}},{\"columnMatch\":\"NotApplicableReason\",\"formatter\":0,\"formatOptions\":{\"customColumnWidthSetting\":\"30ch\"}},{\"columnMatch\":\"FullRecommendationLink\",\"formatter\":7,\"formatOptions\":{\"linkTarget\":\"Url\",\"linkLabel\":\"View\",\"linkIsContextBlade\":false}}],\"rowLimit\":1000,\"hierarchySettings\":{\"treeType\":1,\"groupBy\":[\"update\"]},\"labelSettings\":[{\"columnId\":\"RecommendationName\",\"label\":\"Recommendation name\"},{\"columnId\":\"OriginalState\",\"label\":\"Original state\"},{\"columnId\":\"update\",\"label\":\"Updated state\"},{\"columnId\":\"TimeGenerated\",\"label\":\"Time of change\"},{\"columnId\":\"RecommendationSeverity\",\"label\":\"Severity\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription\"},{\"columnId\":\"AssessedResourceId\",\"label\":\"Resource\"},{\"columnId\":\"NotApplicableReason\",\"label\":\"Reason\"},{\"columnId\":\"FullRecommendationLink\",\"label\":\"View recommendation\"}]},\"sortBy\":[]},\"conditionalVisibility\":{\"parameterName\":\"RecommendationId\",\"comparison\":\"isNotEqualTo\"},\"name\":\"ChangeLogDetails\"}]},\"conditionalVisibilities\":[{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"},{\"parameterName\":\"ErrorHandle\",\"comparison\":\"isNotEqualTo\",\"value\":\"1\"}],\"name\":\"ChangeLog\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"title\":\"Machines not sending current heartbeats\",\"items\":[{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-15m)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 15 minutes\",\"noDataMessage\":\"No machines found not reporting for more than 15 minutes.\",\"timeContext\":{\"durationMs\":86400000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 0\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-24h)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 24 hours\",\"noDataMessage\":\"No machines found not reporting for more than 24 hours.\",\"timeContext\":{\"durationMs\":172800000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 1\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-48h)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 48 hours\",\"noDataMessage\":\"No machines found not reporting for more than 48 hours.\",\"timeContext\":{\"durationMs\":604800000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 2\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-7d)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 7 days\",\"noDataMessage\":\"No machines found not reporting for more than 7 days.\",\"timeContext\":{\"durationMs\":2592000000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 3\",\"styleSettings\":{\"showBorder\":true}}]},\"conditionalVisibilities\":[{\"parameterName\":\"SelectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"NotReportingTab\"},{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"}],\"name\":\"MachinesNotReporting\",\"styleSettings\":{\"showBorder\":true}},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"title\":\"Protection Status\",\"items\":[{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"securityresources\\n| where type =~ \\\"microsoft.security/assessments\\\" or type =~ \\\"microsoft.security/softwareInventories\\\"\\n| extend assessmentStatusCode = case(type =~ \\\"microsoft.security/assessments\\\", tostring(properties.status.code), \\\"\\\")\\n| extend severity = case(assessmentStatusCode =~ \\\"unhealthy\\\", tolower(tostring(properties.metadata.severity)), tolower(assessmentStatusCode))\\n| extend exemptionType = case(tolower(type) != \\\"microsoft.security/assessments\\\",\\\"N/A\\\", case(properties.status.cause =~ \\\"exempt\\\", \\\"Yes\\\", \\\"No\\\"))\\n| extend source = case(type =~ \\\"microsoft.security/assessments\\\", tostring(properties.resourceDetails.Source), \\\"\\\")\\n| extend resourceId = trim(\\\" \\\", tolower(tostring(case(source =~ \\\"azure\\\", properties.resourceDetails.Id,\\n source =~ \\\"aws\\\" and isnotempty(tostring(properties.resourceDetails.ConnectorId)), properties.resourceDetails.Id,\\n source =~ \\\"aws\\\", properties.resourceDetails.AzureResourceId,\\n source =~ \\\"gcp\\\", properties.resourceDetails.AzureResourceId,\\n type =~ \\\"microsoft.security/assessments\\\", extract(\\\"^(.+)/providers/Microsoft.Security/assessments/.+$\\\",1,id),extract(\\\"^(.+)/providers/Microsoft.Security/softwareInventories/.+$\\\",1,id)))))\\n| extend resourceName = iff(source =~ \\\"aws\\\" and isnotempty(tostring(properties.resourceDetails.ConnectorId)), tostring(properties.additionalData.ResourceName), extract(@\\\"(.+)/(.+)\\\", 2, resourceId))\\n| extend regexResourceId = extract_all(@\\\"/providers/([^/]+)(?:/([^/]+)/[^/]+(?:/([^/]+)/[^/]+)?)?/([^/]+)/[^/]+$\\\", resourceId)\\n| extend RegexResourceType = regexResourceId[0]\\n| extend mainType = RegexResourceType[1], extendedType = RegexResourceType[2], resourceType = RegexResourceType[3]\\n| extend providerName = RegexResourceType[0],\\n mainType = case(mainType !~ \\\"\\\", strcat(\\\"/\\\",mainType), \\\"\\\"),\\n extendedType = case(extendedType!~ \\\"\\\", strcat(\\\"/\\\",extendedType), \\\"\\\"),\\n resourceType = case(resourceType!~ \\\"\\\", strcat(\\\"/\\\",resourceType), \\\"\\\")\\n| extend array = split(resourceId, '/')\\n| extend typeFullPath = case(\\n array_length(array) == 3, 'subscription',\\n array_length(array) == 5, 'resourcegroups',\\n source =~ \\\"aws\\\" and isnotempty(tostring(properties.resourceDetails.ConnectorId)), tolower(strcat(providerName, mainType, \\\"/\\\", tostring(properties.additionalData.ResourceProvider), tostring(properties.additionalData.ResourceType))),\\n strcat(providerName, mainType, extendedType, resourceType))\\n| extend resourceType = case(typeFullPath =~ 'resourcegroups' or typeFullPath =~ 'subscription', typeFullPath, tolower(trim(\\\"/\\\", resourceType)))\\n| extend assessmentKey = case(type =~ \\\"microsoft.security/assessments\\\", tostring(name), \\\"\\\")\\n| extend softwareVendorName = case(type =~ \\\"microsoft.security/softwareInventories\\\", tostring(properties.vendor), \\\"\\\")\\n| extend softwareName = case(type =~ \\\"microsoft.security/softwareInventories\\\", tostring(properties.softwareName), \\\"\\\")\\n| extend softwareVersion = case(type =~ \\\"microsoft.security/softwareInventories\\\", tostring(properties.version), \\\"\\\")\\n| extend softwareNameIdentifier = case(type =~ \\\"microsoft.security/softwareInventories\\\", strcat(softwareVendorName, \\\",\\\", softwareName, \\\",\\\", softwareVersion), \\\"\\\")\\n| extend environment = case(type =~ \\\"microsoft.security/assessments\\\", properties.resourceDetails[\\\"Source\\\"], \\\"\\\")\\n| extend environment = case(environment =~ \\\"onpremise\\\", tolower(\\\"Non-Azure\\\"), tolower(environment))\\n| extend osTypeProperty = properties.additionalData[\\\"OS Type\\\"]\\n| extend osType = case(isnotempty(osTypeProperty), osTypeProperty, \\\"\\\")\\n| extend hasAgent = case(assessmentKey == \\\"d1db3318-01ff-16de-29eb-28b344515626\\\" or assessmentKey == \\\"45cfe080-ceb1-a91e-9743-71551ed24e94\\\" or assessmentKey == \\\"720a3e77-0b9a-4fa9-98b6-ddf0fd7e32c1\\\" or assessmentKey == \\\"27ac71b1-75c5-41c2-adc2-858f5db45b08\\\", assessmentStatusCode, \\\"\\\")\\n| extend hasAgent = case(assessmentKey == \\\"4ab6e3c5-74dd-8b35-9ab9-f61b30875b27\\\" or assessmentKey == \\\"181ac480-f7c4-544b-9865-11b8ffe87f47\\\" or assessmentKey == \\\"4fb67663-9ab9-475d-b026-8c544cced439\\\" , \\\"healthy\\\", hasAgent)\\n| extend workspaceAzureResourceId = case(hasAgent !~ \\\"\\\", properties.additionalData[\\\"Reporting workspace azure id\\\"], \\\"\\\")\\n| extend workspaceName = case(workspaceAzureResourceId !~ \\\"\\\", extract(@\\\"(.+)/(.+)\\\", 2, workspaceAzureResourceId), \\\"\\\")\\n| extend assessmentDisplayName = case(type =~ \\\"microsoft.security/assessments\\\", case(isnotempty(properties.displayName), properties.displayName, properties.metadata.displayName), \\\"\\\")\\n| extend assessmentIdentifier = case(type =~ \\\"microsoft.security/assessments\\\", strcat(assessmentKey, \\\",\\\" , assessmentDisplayName, \\\",\\\", severity), \\\"\\\")\\n| summarize assessmentsCount = count() , assessmentsIdentifier = make_list(assessmentIdentifier), softwareNamesIdentifier = make_list(softwareNameIdentifier), hasAgent = max(hasAgent), workspaceName = max(workspaceName), environment = max(environment), osType = max(osType), exemptionType = max(exemptionType) by resourceId, subscriptionId, resourceName, resourceType, typeFullPath, severity\\n| extend packAssessments = pack(severity, assessmentsCount)\\n| summarize assessmentsSummary = make_bag(packAssessments), assessmentsIdentifier = make_set(assessmentsIdentifier), softwareNamesIdentifier = make_set(softwareNamesIdentifier), hasAgent = max(hasAgent), workspaceName= max(workspaceName), environment = max(environment), osType= max(osType), exemptionType = max(exemptionType) by resourceId, subscriptionId, resourceName, resourceType, typeFullPath\\n| extend agentMonitoring = case(hasAgent =~ \\\"NotApplicable\\\" or hasAgent =~ \\\"\\\", '',\\n hasAgent =~ \\\"Unhealthy\\\", \\\"notInstalled\\\",\\n \\\"installed\\\")\\n| join kind=leftouter (\\n securityresources\\n | where type =~ \\\"microsoft.security/pricings\\\"\\n | project subscriptionId, bundleName = tolower(name), freeTrialRemainingTime = properties.freeTrialRemainingTime, pricingTier = tolower(properties.pricingTier)\\n | extend bundlesPricing = pack(bundleName, pricingTier)\\n | summarize subscriptionPricing = make_bag(bundlesPricing) by subscriptionId\\n ) on subscriptionId\\n| extend hasNoSoftwareData = case(array_length(softwareNamesIdentifier) == 1, case(set_has_element(softwareNamesIdentifier, \\\"\\\"), true, false), false)\\n| extend softwareNamesIdentifier = case(hasNoSoftwareData, softwareNamesIdentifier, set_difference(softwareNamesIdentifier, pack_array(\\\"\\\")))\\n| extend AssessmentsHigh = case(isnull(assessmentsSummary.high), 0 , toint(assessmentsSummary.high))\\n| extend AssessmentsMedium = case(isnull(assessmentsSummary.medium), 0 , toint(assessmentsSummary.medium))\\n| extend AssessmentsLow = case(isnull(assessmentsSummary.low), 0 , toint(assessmentsSummary.low))\\n| extend unhealthyAssessmentsCount = AssessmentsHigh + AssessmentsMedium + AssessmentsLow\\n| extend virtualmachines = case(isnull(subscriptionPricing), '' , subscriptionPricing.virtualmachines)\\n| extend virtualmachines = case(virtualmachines == 'free', 'off', 'on')\\n| extend sqlservers = case(isnull(subscriptionPricing), '' , subscriptionPricing.sqlservers)\\n| extend sqlservers = case(sqlservers == 'free', 'off', 'on')\\n| extend kubernetesservice = case(isnull(subscriptionPricing), '' , subscriptionPricing.kubernetesservice)\\n| extend kubernetesservice = case(kubernetesservice == 'free', 'off', 'on')\\n| extend containerregistry = case(isnull(subscriptionPricing), '' , subscriptionPricing.containerregistry)\\n| extend containerregistry = case(containerregistry == 'free', 'off', 'on')\\n| extend connectedcontainerregistry = case(isnull(subscriptionPricing), '' , subscriptionPricing.connectedcontainerregistry)\\n| extend connectedcontainerregistry = case(connectedcontainerregistry == 'free', 'off', 'on')\\n| extend sqlservervirtualmachines = case(isnull(subscriptionPricing), '' , subscriptionPricing.sqlservervirtualmachines)\\n| extend sqlservervirtualmachines = case(sqlservervirtualmachines == 'free', 'off', 'on')\\n| extend appservices = case(isnull(subscriptionPricing), '' , subscriptionPricing.appservices)\\n| extend appservices = case(appservices == 'free', 'off', 'on')\\n| extend storageaccounts = case(isnull(subscriptionPricing), '' , subscriptionPricing.storageaccounts)\\n| extend storageaccounts = case(storageaccounts == 'free', 'off', 'on')\\n| extend keyvaults = case(isnull(subscriptionPricing), '' , subscriptionPricing.keyvaults)\\n| extend keyvaults = case(keyvaults == 'free', 'off', 'on')\\n| extend opensourcerelationaldatabases = case(isnull(subscriptionPricing), '' , subscriptionPricing.opensourcerelationaldatabases)\\n| extend opensourcerelationaldatabases = case(opensourcerelationaldatabases == 'free', 'off', 'on')\\n| extend calculatedSubscriptionPricing = case(resourceType =~ \\\"subscription\\\" and isempty(subscriptionPricing) == false , iff(subscriptionPricing has \\\"free\\\" and subscriptionPricing has \\\"standard\\\", \\\"partial\\\", iff(subscriptionPricing has \\\"free\\\", \\\"off\\\", \\\"on\\\")), \\\"\\\")\\n| extend resourcePricing = case(typeFullPath =~ \\\"microsoft.classiccompute/virtualmachines\\\", virtualmachines, typeFullPath =~ \\\"microsoft.compute/virtualmachines\\\", virtualmachines, typeFullPath =~ \\\"microsoft.hybridcompute/machines\\\", virtualmachines, typeFullPath =~ \\\"microsoft.sql/servers\\\", sqlservers, typeFullPath =~ \\\"microsoft.containerservice/managedclusters\\\", kubernetesservice, typeFullPath =~ \\\"microsoft.kubernetes/connectedclusters\\\", kubernetesservice, typeFullPath =~ \\\"microsoft.containerregistry/registries\\\", containerregistry, typeFullPath =~ \\\"microsoft.security/connectedcontainerregistries\\\", connectedcontainerregistry, typeFullPath =~ \\\"microsoft.sqlvirtualmachine/sqlvirtualmachines\\\", sqlservervirtualmachines, typeFullPath =~ \\\"microsoft.web/sites\\\", appservices, typeFullPath =~ \\\"microsoft.storage/storageaccounts\\\", storageaccounts, typeFullPath =~ \\\"microsoft.compute/virtualmachinescalesets\\\", virtualmachines, typeFullPath =~ \\\"microsoft.keyvault/vaults\\\", keyvaults, typeFullPath =~ \\\"microsoft.dbforpostgresql/servers\\\", opensourcerelationaldatabases, typeFullPath =~ \\\"microsoft.dbformysql/servers\\\", opensourcerelationaldatabases, typeFullPath =~ \\\"microsoft.dbformariadb/servers\\\", opensourcerelationaldatabases, calculatedSubscriptionPricing)\\n| extend pricing = case(resourceType =~ \\\"subscription\\\" , calculatedSubscriptionPricing , resourcePricing)\\n| extend selectedSoftware = \\\"\\\"\\n| project resourceType, exemptionType, typeFullPath, resourceId, resourceName, subscriptionId, environment, osType, workspaceName, agentMonitoring, assessmentsIdentifier, assessmentsSummary, subscriptionPricing, unhealthyAssessmentsCount, pricing, softwareNamesIdentifier, selectedSoftware\\n| extend resourceGroup = tolower(tostring(split(resourceId, \\\"/\\\")[4]))\\n| order by unhealthyAssessmentsCount, subscriptionId, resourceType, resourceId\\n| where typeFullPath in ('microsoft.compute/virtualmachines', 'microsoft.hybridcompute/machines')\\n| where isnotempty(resourceId)\",\"size\":3,\"showAnalytics\":true,\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"value::all\"],\"visualization\":\"table\",\"showExpandCollapseGrid\":true,\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"resourceType\",\"formatter\":5},{\"columnMatch\":\"exemptionType\",\"formatter\":5},{\"columnMatch\":\"typeFullPath\",\"formatter\":5},{\"columnMatch\":\"resourceName\",\"formatter\":5},{\"columnMatch\":\"agentMonitoring\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"installed\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"Unhealthy\",\"representation\":\"2\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"NotApplicable\",\"representation\":\"cancelled\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"notInstalled\",\"representation\":\"disabled\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Disable\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"assessmentsIdentifier\",\"formatter\":5},{\"columnMatch\":\"assessmentsSummary\",\"formatter\":5},{\"columnMatch\":\"subscriptionPricing\",\"formatter\":5},{\"columnMatch\":\"unhealthyAssessmentsCount\",\"formatter\":3,\"formatOptions\":{\"min\":0,\"max\":15,\"palette\":\"greenRed\"}},{\"columnMatch\":\"pricing\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"on\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"off\",\"representation\":\"disabled\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"disabled\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"softwareNamesIdentifier\",\"formatter\":5},{\"columnMatch\":\"selectedSoftware\",\"formatter\":5},{\"columnMatch\":\"resourceGroup\",\"formatter\":5}],\"rowLimit\":1000,\"filter\":true,\"sortBy\":[{\"itemKey\":\"$gen_link_resourceId_3\",\"sortOrder\":2}],\"labelSettings\":[{\"columnId\":\"exemptionType\",\"label\":\"Resource Exemption exists\"},{\"columnId\":\"resourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"subscriptionId\",\"label\":\"Subscription ID\"},{\"columnId\":\"environment\",\"label\":\"Environment\"},{\"columnId\":\"osType\",\"label\":\"OS Type\"},{\"columnId\":\"workspaceName\",\"label\":\"Workspace Name\"},{\"columnId\":\"agentMonitoring\",\"label\":\"Log Analytics agent status\"},{\"columnId\":\"unhealthyAssessmentsCount\",\"label\":\"Open Recommendations\"},{\"columnId\":\"pricing\",\"label\":\"Azure Defender status\"}]},\"sortBy\":[{\"itemKey\":\"$gen_link_resourceId_3\",\"sortOrder\":2}]},\"name\":\"query - 0\",\"styleSettings\":{\"showBorder\":true}}]},\"conditionalVisibility\":{\"parameterName\":\"SelectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"SecurityTab\"},\"name\":\"protectionStatus\",\"styleSettings\":{\"showBorder\":true}}]},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"Security\"},\"name\":\"Security\"}],\"isLocked\":false,\"fallbackResourceIds\":[\"workbookresourceid-stage\"]}",
+ "serializedData": "{\"version\":\"Notebook/1.0\",\"items\":[{\"type\":1,\"content\":{\"json\":\"# Jumpstart ArcBox Workbook DataOps\\r\\n\\r\\nKeep track of your ArcBox resources by selecting one of the tabs below:\\r\\n____________________________________________________________________________________________________\\r\\n\"},\"name\":\"text - 3\"},{\"type\":11,\"content\":{\"version\":\"LinkItem/1.0\",\"style\":\"tabs\",\"links\":[{\"id\":\"001e53e2-be76-428e-9081-da7ce60368d4\",\"cellValue\":\"selectedTab\",\"linkTarget\":\"parameter\",\"linkLabel\":\"Inventory\",\"subTarget\":\"Inventory\",\"style\":\"link\"},{\"id\":\"547c6d68-f351-4898-bd7f-de56cd1ea984\",\"cellValue\":\"selectedTab\",\"linkTarget\":\"parameter\",\"linkLabel\":\"Monitoring\",\"subTarget\":\"Monitoring\",\"style\":\"link\"},{\"id\":\"942dd542-ac90-4ee4-bb5d-477c931c05b4\",\"cellValue\":\"selectedTab\",\"linkTarget\":\"parameter\",\"linkLabel\":\"Security\",\"subTarget\":\"Security\",\"style\":\"link\"}]},\"customWidth\":\"100\",\"name\":\"links - 7\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\"\"},\"name\":\"text - 4\"},{\"type\":1,\"content\":{\"json\":\"## Jumpstart ArcBox Metrics and Alerts\\r\\n\\r\\n💡 Select your Azure ArcBox subscription and Resource Group to see more information.\"},\"name\":\"text - 1\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{subscriptionId}\"],\"parameters\":[{\"id\":\"1f74ed9a-e3ed-498d-bd5b-f68f3836a117\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscriptionId\",\"label\":\"Subscriptions\",\"type\":6,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"includeAll\":true,\"showDefault\":false}},{\"id\":\"b616a3a3-4271-4208-b1a9-a92a78efed08\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resourceGroup\",\"label\":\"Resource groups\",\"type\":2,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"Resources\\r\\n| summarize by resourceGroup\\r\\n| order by resourceGroup asc\\r\\n| project id=resourceGroup, resourceGroup\",\"crossComponentResources\":[\"{subscriptionId}\"],\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"*\",\"showDefault\":false},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"0e85e0e4-a7e8-4ea8-b291-e444c317843a\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"ResourceTypes\",\"label\":\"Resource types\",\"type\":7,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"includeAll\":true,\"selectAllValue\":\"*\",\"showDefault\":false}},{\"id\":\"f60ea0a0-3703-44ca-a59b-df0246423f41\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"Resources\",\"type\":5,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"Resources\\r\\n| where \\\"*\\\" in ({ResourceTypes}) or type in~({ResourceTypes})\\r\\n| where '*' in~({resourceGroup}) or resourceGroup in~({resourceGroup}) \\r\\n| order by name asc\\r\\n| extend Rank = row_number()\\r\\n| project value = id, label = name, selected = Rank <= 10, group = resourceGroup\",\"crossComponentResources\":[\"{subscriptionId}\"],\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"*\",\"showDefault\":false},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"015d1a5e-357f-4e01-ac77-598e7b493db0\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"timeRange\",\"label\":\"Time Range\",\"type\":4,\"isRequired\":true,\"value\":{\"durationMs\":3600000},\"typeSettings\":{\"selectableValues\":[{\"durationMs\":300000},{\"durationMs\":900000},{\"durationMs\":1800000},{\"durationMs\":3600000},{\"durationMs\":14400000},{\"durationMs\":43200000},{\"durationMs\":86400000},{\"durationMs\":172800000},{\"durationMs\":259200000},{\"durationMs\":604800000},{\"durationMs\":1209600000},{\"durationMs\":2419200000},{\"durationMs\":2592000000}],\"allowCustom\":true}},{\"id\":\"bd6d6075-dc8f-43d3-829f-7e2245a3eb21\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"State\",\"type\":2,\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"{\\\"version\\\":\\\"1.0.0\\\",\\\"content\\\":\\\"[ \\\\r\\\\n {\\\\\\\"id\\\\\\\":\\\\\\\"New\\\\\\\", \\\\\\\"label\\\\\\\": \\\\\\\"New\\\\\\\"},\\\\r\\\\n {\\\\\\\"id\\\\\\\":\\\\\\\"Acknowledged\\\\\\\", \\\\\\\"label\\\\\\\": \\\\\\\"Acknowledged\\\\\\\"},\\\\r\\\\n {\\\\\\\"id\\\\\\\":\\\\\\\"Closed\\\\\\\", \\\\\\\"label\\\\\\\": \\\\\\\"Closed\\\\\\\"}\\\\r\\\\n]\\\",\\\"transformers\\\":null}\",\"crossComponentResources\":[\"{Subscription}\"],\"value\":[\"value::all\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"*\",\"showDefault\":false},\"queryType\":8}],\"style\":\"above\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},\"name\":\"parameters\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"AlertsManagementResources | where type =~ 'microsoft.alertsmanagement/alerts'\\r\\n| where todatetime(properties.essentials.startDateTime) {timeRange} \\r\\n| where \\\"*\\\" in ({resourceGroup}) or properties.essentials.targetResourceGroup in~ ({resourceGroup})\\r\\n| where \\\"*\\\" in ({ResourceTypes}) or properties.essentials.targetResourceType in~ ({ResourceTypes})\\r\\n| where \\\"*\\\" in ({Resources}) or properties.essentials.targetResource in~ ({Resources})\\r\\n| extend State=tostring(properties.essentials.alertState)\\r\\n| where \\\"*\\\" in ({State}) or State in ({State})\\r\\n| summarize Count=count(), New=countif(State==\\\"New\\\"), \\r\\nAcknowledged=countif(State==\\\"Acknowledged\\\"), \\r\\nClosed=countif(State==\\\"Closed\\\") \\r\\nby Severity=tostring(properties.essentials.severity)\\r\\n| order by Severity asc\",\"size\":3,\"title\":\"Alert Summary\",\"noDataMessage\":\"No alerts found\",\"exportMultipleValues\":true,\"exportedParameters\":[{\"fieldName\":\"Severity\",\"parameterName\":\"Severity\",\"parameterType\":1}],\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"{subscriptionId}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"Severity\",\"formatter\":11},{\"columnMatch\":\"Count\",\"formatter\":3,\"formatOptions\":{\"min\":0,\"palette\":\"blue\",\"aggregation\":\"Sum\"},\"numberFormat\":{\"unit\":17,\"options\":{\"style\":\"decimal\",\"maximumFractionDigits\":2}}},{\"columnMatch\":\"State\",\"formatter\":1}]}},\"showPin\":true,\"name\":\"query - 6\"},{\"type\":1,\"content\":{\"json\":\"## Azure Arc-enabled Kubernetes\"},\"name\":\"text - 9\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{resource}\"],\"parameters\":[{\"id\":\"e2b5cd30-7276-477f-a6bb-07da25ba5e5f\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"timeRange\",\"label\":\"Time Range\",\"type\":4,\"description\":\"Filter data by time range\",\"isRequired\":true,\"value\":{\"durationMs\":7776000000},\"typeSettings\":{\"selectableValues\":[{\"durationMs\":300000},{\"durationMs\":900000},{\"durationMs\":1800000},{\"durationMs\":3600000},{\"durationMs\":14400000},{\"durationMs\":43200000},{\"durationMs\":86400000},{\"durationMs\":172800000},{\"durationMs\":259200000},{\"durationMs\":604800000},{\"durationMs\":1209600000},{\"durationMs\":2419200000},{\"durationMs\":2592000000},{\"durationMs\":5184000000},{\"durationMs\":7776000000}],\"allowCustom\":true}},{\"id\":\"b8b76ad0-de1a-4b7c-90a8-f4eb277bb878\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscription\",\"label\":\"Subscription\",\"type\":6,\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"6b8d59ca-08c5-40fb-9962-5061b3e6e779\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workspaceName\",\"label\":\"Log Analytics Workspace\",\"type\":5,\"query\":\"resources\\r\\n| where type contains 'microsoft.operationalinsights/workspaces'\\r\\n| project id\",\"crossComponentResources\":[\"{subscription}\"],\"value\":\"\",\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"7aa94d19-4c5b-40e2-b14f-e29736a8f90c\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resource\",\"label\":\"Azure Arc-enabled K8S cluster\",\"type\":5,\"query\":\" Resources\\r\\n | where type =~ 'microsoft.kubernetes/connectedclusters'\\r\\n | project id\",\"crossComponentResources\":[\"{subscription}\"],\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"value\":\"\"},{\"id\":\"3a3fdabe-6173-4e2b-8658-38c0195fd7e2\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resourceType\",\"type\":7,\"isRequired\":true,\"query\":\"{\\\"version\\\":\\\"1.0.0\\\",\\\"content\\\":\\\"\\\\\\\"{resource:resourcetype}\\\\\\\"\\\",\\\"transformers\\\":null}\",\"typeSettings\":{\"additionalResourceOptions\":[\"value::1\"],\"showDefault\":false},\"defaultValue\":\"value::1\",\"queryType\":8},{\"id\":\"9767de49-ba31-4847-9ffc-714c02e7523c\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"clusterId\",\"type\":1,\"description\":\"Filter workspace by cluster id\",\"isHiddenWhenLocked\":true,\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"cba109cf-db6e-4261-8d3a-fe038593622d\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"clusterIdWhereClause\",\"type\":1,\"description\":\"Add to queries to filter by cluster id\",\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"criteriaContext\":{\"leftOperand\":\"resourceType\",\"operator\":\"contains\",\"rightValType\":\"static\",\"rightVal\":\"microsoft.operationalinsights/workspaces\",\"resultValType\":\"static\",\"resultVal\":\"| where ClusterId =~ '{clusterId}'\"}},{\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"ee080bd8-83dc-4fa0-b688-b2f16b956b92\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadType\",\"label\":\"Workload Type\",\"type\":2,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n| distinct ControllerKind\\r\\n| where isempty(ControllerKind) == false\\r\\n| order by ControllerKind asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::all\",\"queryType\":0,\"resourceType\":\"microsoft.kubernetes/connectedclusters\"},{\"id\":\"cf611d4b-aa93-4949-a7a1-c1d174af29ca\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadKindWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"condition\":\"if (workloadType is not empty ), result = '| where ControllerKind in ({workloadType})'\",\"criteriaContext\":{\"leftOperand\":\"workloadType\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where ControllerKind in ({workloadType})\"}},{\"condition\":\"else result = '| where \\\"a\\\" == \\\"a\\\"'\",\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"034caae5-bee3-4b66-8f80-c120a2a25c77\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"namespace\",\"label\":\"Namespace\",\"type\":2,\"description\":\"Filter the workbook by namespace\",\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n| distinct Namespace\\r\\n| where isnotempty(Namespace)\\r\\n| order by Namespace asc\",\"crossComponentResources\":[\"{Workspace}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"},{\"id\":\"faeee248-e4c3-4fae-b435-ef5fb6dabe3b\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"namespaceWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"condition\":\"if (namespace is not empty ), result = '| where Namespace in ({namespace})'\",\"criteriaContext\":{\"leftOperand\":\"namespace\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where Namespace in ({namespace})\"}},{\"condition\":\"else result = '| where \\\"a\\\" == \\\"a\\\"'\",\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"8943e259-1dde-44cd-a00b-e815eea9de34\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadName\",\"label\":\"Workload Name\",\"type\":2,\"description\":\"Filter the data for a particular workload\",\"isRequired\":true,\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{namespaceWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n| distinct ControllerName\\r\\n| where isnotempty(ControllerName)\\r\\n| order by ControllerName asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::1\"],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::1\",\"queryType\":0,\"resourceType\":\"microsoft.kubernetes/connectedclusters\"},{\"id\":\"00a9be6c-ab0b-400b-b195-9775a47ecddd\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podStatus\",\"label\":\"Pod Status\",\"type\":2,\"description\":\"Filter by Pod status like Pending/Running/Failed etc.\",\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n| where ControllerName == '{workloadName}'\\r\\n| distinct PodStatus\\r\\n| where isnotempty(PodStatus)\\r\\n| order by PodStatus asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::all\",\"queryType\":0,\"resourceType\":\"{resourceType}\",\"value\":[\"value::all\"]},{\"id\":\"388ea6aa-12d8-485a-8e80-b4d7b8994bd8\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podStatusWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"criteriaContext\":{\"leftOperand\":\"podStatus\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where PodStatus in ({podStatus})\"}},{\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":2592000000},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"64de23e6-96b5-4105-b65d-36e40f73f4ec\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podName\",\"label\":\"Pod Name\",\"type\":2,\"description\":\"Filter by pod name \",\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n| where ControllerName == '{workloadName:value}'\\r\\n{podStatusWhereClause}\\r\\n| summarize arg_max(TimeGenerated, PodStatus) by Name\\r\\n| project Name\\r\\n| where isempty(Name) == false\\r\\n| order by Name asc\",\"crossComponentResources\":[\"{resource}\"],\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"selectAllValue\":\"\",\"showDefault\":false},\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\",\"defaultValue\":\"value::all\",\"queryType\":0,\"resourceType\":\"{resourceType}\",\"value\":[\"value::all\"]},{\"id\":\"4f7059c2-ebd7-4fc2-86c4-c51e66703582\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podNameWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"condition\":\"if (podName is not empty ), result = '| where PodName in ({podName})'\",\"criteriaContext\":{\"leftOperand\":\"podName\",\"operator\":\"isNotNull\",\"rightValType\":\"static\",\"rightVal\":\"unset\",\"resultValType\":\"static\",\"resultVal\":\"| where PodName in ({podName})\"}},{\"condition\":\"else result = '| where \\\"a\\\" == \\\"a\\\"'\",\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"e60298ff-36da-485e-acea-73c0692b8446\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadNamespaceText\",\"type\":1,\"description\":\"For displaying name space of the selected workload\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where ControllerName == '{workloadName}'\\r\\n| summarize Namespaces=make_set(Namespace)\\r\\n| extend Namespaces = strcat_array(Namespaces, ', ')\",\"crossComponentResources\":[\"{resource}\"],\"isHiddenWhenLocked\":true,\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"{resourceType}\"},{\"id\":\"9f8d0d65-d7bc-42c9-bc5c-b394288b5216\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workloadTypeText\",\"type\":1,\"description\":\"For displaying workload type of the selected workload\",\"query\":\"KubePodInventory\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n| where ControllerName == '{workloadName}'\\r\\n| summarize ControllerKinds=make_set(ControllerKind)\\r\\n| extend ControllerKinds = strcat_array(ControllerKinds, ', ')\",\"crossComponentResources\":[\"{resource}\"],\"isHiddenWhenLocked\":true,\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"{resourceType}\"}],\"style\":\"above\",\"queryType\":0,\"resourceType\":\"microsoft.kubernetes/connectedclusters\"},\"name\":\"pills\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"parameters\":[{\"id\":\"55cc0c6d-51df-4e58-9543-c8b21bc71e29\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"podTileStatusWhereClause\",\"type\":1,\"isHiddenWhenLocked\":true,\"criteriaData\":[{\"criteriaContext\":{\"leftOperand\":\"podStatusTileText\",\"operator\":\"!=\",\"rightValType\":\"static\",\"rightVal\":\"All\",\"resultValType\":\"static\",\"resultVal\":\"| where PodStatus == '{podStatusTileText}'\"}},{\"criteriaContext\":{\"operator\":\"Default\",\"rightValType\":\"param\",\"resultValType\":\"static\",\"resultVal\":\"| where \\\"a\\\" == \\\"a\\\"\"}}],\"timeContext\":{\"durationMs\":14400000},\"timeContextFromParameter\":\"timeRange\"}],\"style\":\"pills\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"asas\"},\"name\":\"pod-status-tile-text\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let endDateTime = {timeRange:end};\\r\\nlet startDateTime = {timeRange:start};\\r\\nlet trendBinSize = {timeRange:grain};\\r\\nlet controllerName= '{workloadName}';\\r\\nKubePodInventory\\r\\n| where TimeGenerated >= startDateTime\\r\\n| where TimeGenerated < endDateTime\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where isnotempty(ClusterName)\\r\\n| where isnotempty(Namespace)\\r\\n| where ControllerName == controllerName\\r\\n| extend PodName = Name\\r\\n{podStatusWhereClause}\\r\\n{podTileStatusWhereClause}\\r\\n{podNameWhereClause}\\r\\n| summarize PodRestartCount=max(PodRestartCount) by PodName, bin(TimeGenerated, trendBinSize)\\r\\n| order by PodName asc nulls last, TimeGenerated asc\\r\\n| serialize \\r\\n| extend prevValue=iif(prev(PodName) == PodName, prev(PodRestartCount), PodRestartCount)\\r\\n| extend RestartCount=PodRestartCount - prevValue\\r\\n| extend RestartCount=iif(RestartCount < 0, 0, RestartCount) \\r\\n| project TimeGenerated, PodName, RestartCount\\r\\n| render timechart\",\"size\":0,\"aggregation\":5,\"showAnalytics\":true,\"title\":\"Azure Arc-enabled kubernetes - Pod Restart Trend\",\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"]},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"pod-restart-trend-chart\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let endDateTime = {timeRange:end};\\r\\nlet startDateTime = {timeRange:start};\\r\\nlet trendBinSize = {timeRange:grain};\\r\\nlet controllerName= '{workloadName:value}';\\r\\nKubePodInventory\\r\\n| where TimeGenerated >= startDateTime\\r\\n| where TimeGenerated < endDateTime\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where isnotempty(ClusterName)\\r\\n| where isnotempty(Namespace)\\r\\n| where ControllerName == controllerName\\r\\n| extend PodName = Name\\r\\n{podStatusWhereClause}\\r\\n{podTileStatusWhereClause}\\r\\n{podNameWhereClause}\\r\\n| extend ContainerName=tostring(split(ContainerName, '/')[1])\\r\\n| where isempty(ContainerName) == false\\r\\n| summarize ContainerRestartCount=sum(ContainerRestartCount) by ContainerName, bin(TimeGenerated, 1tick)\\r\\n| order by ContainerName asc nulls last, TimeGenerated asc\\r\\n| serialize \\r\\n| extend prevValue=iif(prev(ContainerName) == ContainerName, prev(ContainerRestartCount), ContainerRestartCount)\\r\\n| extend RestartCount=ContainerRestartCount - prevValue\\r\\n| extend RestartCount=iif(RestartCount < 0, 0, RestartCount) \\r\\n| project TimeGenerated, ContainerName, RestartCount\\r\\n| summarize RestartCount=sum(RestartCount) by ContainerName, bin(TimeGenerated, trendBinSize)\",\"size\":0,\"aggregation\":5,\"showAnalytics\":true,\"title\":\"Azure Arc-enabled kubernetes - Container restart trend\",\"timeContextFromParameter\":\"timeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"timechart\"},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"container-restart-trend-chart\",\"styleSettings\":{\"showBorder\":true}},{\"type\":10,\"content\":{\"chartId\":\"workbook3e0e301c-50cf-4e53-ac2a-40f5eed823a0\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":3600000},\"metrics\":[{\"namespace\":\"insights.container/pods\",\"metric\":\"insights.container/pods--PodCount\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled kubernetes - Pod Count\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 19\"},{\"type\":10,\"content\":{\"chartId\":\"workbook167c4490-9cde-4fcd-be0f-401070f13ccd\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":3600000},\"metrics\":[{\"namespace\":\"insights.container/pods\",\"metric\":\"insights.container/pods--PodReadyPercentage\",\"aggregation\":4,\"splitBy\":null},{\"namespace\":\"insights.container/pods\",\"metric\":\"insights.container/pods--restartingContainerCount\",\"aggregation\":4}],\"title\":\"Azure Arc-enabled Kubernetes - Pod status\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 20\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let endDateTime = {timeRange:end};\\r\\nlet startDateTime = {timeRange:start};\\r\\nlet trendBinSize = {timeRange:grain};\\r\\nlet controllerName= '{workloadName}';\\r\\nKubePodInventory\\r\\n| where TimeGenerated >= startDateTime\\r\\n| where TimeGenerated < endDateTime\\r\\n{clusterIdWhereClause}\\r\\n{workloadKindWhereClause}\\r\\n{namespaceWhereClause}\\r\\n| where isnotempty(ClusterName)\\r\\n| where isnotempty(Namespace)\\r\\n| extend PodName = Name\\r\\n{podStatusWhereClause}\\r\\n{podNameWhereClause}\\r\\n| where ControllerName == controllerName\\r\\n| extend InstanceName = strcat(ClusterId, '/', ContainerName),\\r\\n ContainerName = strcat(Name, '/', tostring(split(ContainerName, '/')[1]))\\r\\n| summarize arg_max(TimeGenerated, *) by ContainerName, Name\\r\\n{podTileStatusWhereClause}\\r\\n| extend ContainerLastStatus = todynamic(ContainerLastStatus) \\r\\n| project TimeGenerated, ContainerName, PodStatus, ContainerStatus, LastState=ContainerLastStatus.lastState, LastStateReason=ContainerLastStatus.reason, LastStateStartTime=ContainerLastStatus.startedAt,\\r\\nLastStateFinishTime=ContainerLastStatus.finishedAt\\r\\n\",\"size\":0,\"aggregation\":5,\"showAnalytics\":true,\"title\":\"Azure Arc-enabled kubernetes - Container Status for Pods\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"TimeGenerated\",\"formatter\":6,\"formatOptions\":{},\"dateFormat\":{\"showUtcTime\":null,\"formatName\":\"shortDateTimePattern\"}},{\"columnMatch\":\"PodStatus\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"Running\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"Pending\",\"representation\":\"pending\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"Failed\",\"representation\":\"failed\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Blank\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"ContainerStatus\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"Running\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"waiting\",\"representation\":\"pending\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"success\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"LastState\",\"formatter\":0,\"formatOptions\":{},\"numberFormat\":{\"unit\":0,\"options\":{\"style\":\"decimal\"},\"emptyValCustomText\":\"-\"}},{\"columnMatch\":\"LastStateReason\",\"formatter\":0,\"formatOptions\":{},\"numberFormat\":{\"unit\":0,\"options\":{\"style\":\"decimal\"},\"emptyValCustomText\":\"-\"}},{\"columnMatch\":\"LastStateStartTime\",\"formatter\":6,\"formatOptions\":{},\"dateFormat\":{\"showUtcTime\":null,\"formatName\":\"shortDateTimePattern\"}},{\"columnMatch\":\"LastStateFinishTime\",\"formatter\":6,\"formatOptions\":{},\"dateFormat\":{\"showUtcTime\":null,\"formatName\":\"shortDateTimePattern\"}}]},\"sortBy\":[]},\"showPin\":true,\"name\":\"container-status-for-pods-chart\"},{\"type\":10,\"content\":{\"chartId\":\"workbook87327d65-b260-4473-9f2b-5d90b1100543\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":2592000000},\"metrics\":[{\"namespace\":\"insights.container/nodes\",\"metric\":\"insights.container/nodes--cpuUsagePercentage\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled kubernetes cluster - Node CPU usage %\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 17\"},{\"type\":10,\"content\":{\"chartId\":\"workbook2f202f95-1281-4077-a49b-31c3e3d3271b\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.kubernetes/connectedclusters\",\"metricScope\":0,\"resourceParameter\":\"resource\",\"resourceIds\":[\"{resource}\"],\"timeContext\":{\"durationMs\":3600000},\"metrics\":[{\"namespace\":\"insights.container/nodes\",\"metric\":\"insights.container/nodes--memoryWorkingSetPercentage\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled kubernetes cluster - Node memory working set %\",\"gridSettings\":{\"rowLimit\":10000}},\"customWidth\":\"50\",\"name\":\"metric - 18\"},{\"type\":1,\"content\":{\"json\":\"## Azure Arc-enabled SQL Managed Instance\"},\"name\":\"text - 18\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{subscription}\"],\"parameters\":[{\"id\":\"be802690-79de-4708-8629-4c57b0d78085\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscriptionId\",\"label\":\"Subscription\",\"type\":6,\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\"},{\"id\":\"3bd2e749-7c3f-47fd-9f8a-7ab118be8850\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workspaceName\",\"label\":\"Log Analytics Workspace\",\"type\":5,\"query\":\"resources\\r\\n| where type contains 'microsoft.operationalinsights/workspaces'\\r\\n| project id\",\"crossComponentResources\":[\"{subscription}\"],\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"a308510f-a9f5-4ee4-a4b1-a175aa96b290\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"SQLMI\",\"label\":\"Azure Arc-enabled SQL MI\",\"type\":5,\"query\":\" Resources\\r\\n | where type =~ 'Microsoft.AzureArcData/sqlManagedInstances'\\r\\n | project id\",\"crossComponentResources\":[\"{subscription}\"],\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":0},\"timeContextFromParameter\":\"timeRange\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"}],\"style\":\"above\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},\"name\":\"pills - Copy\"},{\"type\":10,\"content\":{\"chartId\":\"789dd9d3-afbc-4440-8e31-7fe124f7b9ce\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.azurearcdata/sqlmanagedinstances\",\"metricScope\":0,\"resourceParameter\":\"SQLMI\",\"resourceIds\":[\"{SQLMI}\"],\"timeContext\":{\"durationMs\":86400000},\"metrics\":[{\"namespace\":\"sql server\",\"metric\":\"sql server--CPU Usage: pod-0\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled SQL MI - CPU usage\",\"gridSettings\":{\"rowLimit\":10000}},\"name\":\"metric - 0\"},{\"type\":10,\"content\":{\"chartId\":\"540fb39f-7903-4cc8-af49-679ee1f331fe\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.azurearcdata/sqlmanagedinstances\",\"metricScope\":0,\"resourceParameter\":\"SQLMI\",\"resourceIds\":[\"{SQLMI}\"],\"timeContext\":{\"durationMs\":86400000},\"metrics\":[{\"namespace\":\"sql server\",\"metric\":\"sql server--Memory Usage: pod-0\",\"aggregation\":4,\"splitBy\":null}],\"gridSettings\":{\"rowLimit\":10000}},\"name\":\"metric - 0\"},{\"type\":10,\"content\":{\"chartId\":\"31b401d2-6d90-4a57-a61a-d6e458523448\",\"version\":\"MetricsItem/2.0\",\"size\":0,\"chartType\":2,\"resourceType\":\"microsoft.azurearcdata/sqlmanagedinstances\",\"metricScope\":0,\"resourceParameter\":\"SQLMI\",\"resourceIds\":[\"{SQLMI}\"],\"timeContext\":{\"durationMs\":86400000},\"metrics\":[{\"namespace\":\"sql server\",\"metric\":\"sql server--Transactions/second: pod-0\",\"aggregation\":4,\"splitBy\":null}],\"title\":\"Azure Arc-enabled SQL MI - Transactions per Second\",\"gridSettings\":{\"rowLimit\":10000}},\"name\":\"metric - 0\"}]},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"Monitoring\"},\"name\":\"Monitoring\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\"## Jumpstart ArcBox resource inventory\\r\\n\\r\\n💡 Select your Azure ArcBox subscription and Resource Group to see more information.\"},\"name\":\"text - 4\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{subscriptionId}\"],\"parameters\":[{\"id\":\"984514df-fff0-434c-a373-7090566e8c44\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"subscriptionId\",\"type\":6,\"value\":null,\"typeSettings\":{\"additionalResourceOptions\":[],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":86400000},\"label\":\"Subscription\"},{\"id\":\"cb849a6b-937d-4e93-8d09-770554777009\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"resourceGroup\",\"label\":\"Resource Group\",\"type\":2,\"query\":\"Resources\\r\\n| summarize by resourceGroup\\r\\n| order by resourceGroup asc\\r\\n| project id=resourceGroup, resourceGroup\",\"crossComponentResources\":[\"{subscriptionId}\"],\"value\":\"arcboxdataops\",\"typeSettings\":{\"additionalResourceOptions\":[],\"showDefault\":false},\"timeContext\":{\"durationMs\":86400000},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},{\"id\":\"0fd9f40f-ffe0-4894-adc7-64866aa4b1e4\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"ResourceType\",\"label\":\"Resources\",\"type\":7,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"typeSettings\":{\"additionalResourceOptions\":[\"value::all\"],\"includeAll\":true,\"showDefault\":false},\"timeContext\":{\"durationMs\":86400000},\"value\":[\"value::all\"]}],\"style\":\"pills\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\"},\"name\":\"parameters - 1\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Resources \\r\\n| where resourceGroup == \\\"{resourceGroup}\\\"\\r\\n| extend type = case(\\r\\ntype contains 'microsoft.netapp/netappaccounts', 'NetApp Accounts',\\r\\ntype contains \\\"microsoft.compute\\\", \\\"Azure Compute\\\",\\r\\ntype contains \\\"microsoft.logic\\\", \\\"LogicApps\\\",\\r\\ntype contains 'microsoft.keyvault/vaults', \\\"Key Vaults\\\",\\r\\ntype contains 'microsoft.storage/storageaccounts', \\\"Storage Accounts\\\",\\r\\ntype contains 'microsoft.compute/availabilitysets', 'Availability Sets',\\r\\ntype contains 'microsoft.operationalinsights/workspaces', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.operationsmanagement', 'Operations Management Resources',\\r\\ntype contains 'microsoft.insights', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.desktopvirtualization/applicationgroups', 'WVD Application Groups',\\r\\ntype contains 'microsoft.desktopvirtualization/workspaces', 'WVD Workspaces',\\r\\ntype contains 'microsoft.desktopvirtualization/hostpools', 'WVD Hostpools',\\r\\ntype contains 'microsoft.recoveryservices/vaults', 'Backup Vaults',\\r\\ntype contains 'microsoft.web', 'App Services',\\r\\ntype contains 'microsoft.managedidentity/userassignedidentities','Managed Identities',\\r\\ntype contains 'microsoft.storagesync/storagesyncservices', 'Azure File Sync',\\r\\ntype contains 'microsoft.hybridcompute/machines', 'Azure Arc-enabled servers ',\\r\\ntype contains 'Microsoft.EventHub', 'Event Hub',\\r\\ntype contains 'Microsoft.EventGrid', 'Event Grid',\\r\\ntype contains 'Microsoft.Sql', 'SQL Resources',\\r\\ntype contains 'Microsoft.HDInsight/clusters', 'HDInsight Clusters',\\r\\ntype contains 'microsoft.devtestlab', 'DevTest Labs Resources',\\r\\ntype contains 'microsoft.containerinstance', 'Container Instances Resources',\\r\\ntype contains 'microsoft.portal/dashboards', 'Azure Dashboards',\\r\\ntype contains 'microsoft.containerregistry/registries', 'Container Registry',\\r\\ntype contains 'microsoft.automation', 'Automation Resources',\\r\\ntype contains 'sendgrid.email/accounts', 'SendGrid Accounts',\\r\\ntype contains 'microsoft.datafactory/factories', 'Data Factory',\\r\\ntype contains 'microsoft.databricks/workspaces', 'Databricks Workspaces',\\r\\ntype contains 'microsoft.machinelearningservices/workspaces', 'Machine Learnings Workspaces',\\r\\ntype contains 'microsoft.alertsmanagement/smartdetectoralertrules', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.apimanagement/service', 'API Management Services',\\r\\ntype contains 'microsoft.dbforpostgresql', 'PostgreSQL Resources',\\r\\ntype contains 'microsoft.scheduler/jobcollections', 'Scheduler Job Collections',\\r\\ntype contains 'microsoft.visualstudio/account', 'Azure DevOps Organization',\\r\\ntype contains 'microsoft.network/', 'Network Resources',\\r\\ntype contains 'microsoft.migrate/' or type contains 'microsoft.offazure', 'Azure Migrate Resources',\\r\\ntype contains 'microsoft.servicebus/namespaces', 'Service Bus Namespaces',\\r\\ntype contains 'microsoft.classic', 'ASM Obsolete Resources',\\r\\ntype contains 'microsoft.resources/templatespecs', 'Template Spec Resources',\\r\\ntype contains 'microsoft.virtualmachineimages', 'VM Image Templates',\\r\\ntype contains 'microsoft.documentdb', 'CosmosDB DB Resources',\\r\\ntype contains 'microsoft.alertsmanagement/actionrules', 'Azure Monitor Resources',\\r\\ntype contains 'microsoft.kubernetes/connectedclusters', 'Azure Arc-enabled Kubernetes',\\r\\ntype contains 'microsoft.purview', 'Purview Resources',\\r\\ntype contains 'microsoft.security', 'Security Resources',\\r\\ntype contains 'microsoft.cdn', 'CDN Resources',\\r\\ntype contains 'microsoft.devices','IoT Resources',\\r\\ntype contains 'microsoft.datamigration', 'Data Migraiton Services',\\r\\ntype contains 'microsoft.cognitiveservices', 'Congitive Services',\\r\\ntype contains 'microsoft.customproviders', 'Custom Providers',\\r\\ntype contains 'microsoft.appconfiguration', 'App Services',\\r\\ntype contains 'microsoft.search', 'Search Services',\\r\\ntype contains 'microsoft.maps', 'Maps',\\r\\ntype contains 'microsoft.containerservice/managedclusters', 'AKS',\\r\\ntype contains 'microsoft.signalrservice', 'SignalR',\\r\\ntype contains 'microsoft.resourcegraph/queries', 'Resource Graph Queries',\\r\\ntype contains 'microsoft.batch', 'MS Batch',\\r\\ntype contains 'microsoft.analysisservices', 'Analysis Services',\\r\\ntype contains 'microsoft.synapse/workspaces', 'Synapse Workspaces',\\r\\ntype contains 'microsoft.synapse/workspaces/sqlpools', 'Synapse SQL Pools',\\r\\ntype contains 'microsoft.kusto/clusters', 'ADX Clusters',\\r\\ntype contains 'microsoft.resources/deploymentscripts', 'Deployment Scripts',\\r\\ntype contains 'microsoft.aad/domainservices', 'AD Domain Services',\\r\\ntype contains 'microsoft.labservices/labaccounts', 'Lab Accounts',\\r\\ntype contains 'microsoft.automanage/accounts', 'Automanage Accounts',\\r\\ntype contains 'microsoft.extendedlocation/customlocations', 'Azure Arc Custom Locations',\\r\\ntype contains 'microsoft.azurearcdata/postgresinstances', 'Azure Arc-enabled PostgresSQL',\\r\\ntype contains 'microsoft.azurearcdata/sqlmanagedinstances', 'Azure Arc-enabled SQL Managed Instance',\\r\\ntype contains 'microsoft.azurearcdata/datacontrollers', 'Azure Arc-enabled data controller',\\r\\ntype contains 'microsoft.azurearcdata/sqlserverinstances', 'Azure Arc-enabled SQL server',\\r\\nstrcat(\\\"Not Translated: \\\", type))\\r\\n| summarize count() by type\",\"size\":1,\"title\":\"Resource Count by Type\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"{subscriptionId}\"],\"visualization\":\"tiles\",\"tileSettings\":{\"titleContent\":{\"columnMatch\":\"type\",\"formatter\":1},\"leftContent\":{\"columnMatch\":\"count_\",\"formatter\":12,\"formatOptions\":{\"palette\":\"auto\"},\"numberFormat\":{\"unit\":17,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumFractionDigits\":2,\"maximumSignificantDigits\":3}}},\"showBorder\":true,\"sortCriteriaField\":\"count_\",\"sortOrderField\":2}},\"name\":\"query - Overview Resource Counts by type\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Resources\\r\\n| where type in~ ({ResourceType})\\r\\n| where resourceGroup == \\\"{resourceGroup}\\\"\\r\\n| project Resource = id, Subscription = subscriptionId, ['Resource group'] = strcat('/subscriptions/', subscriptionId, '/resourceGroups/', resourceGroup), Location = location, tags\",\"size\":2,\"title\":\"Resources List\",\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"{subscriptionId}\"]},\"name\":\"query - 2\"}]},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"Inventory\"},\"name\":\"Inventory\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\"## Jumpstart ArcBox security overview\"},\"name\":\"text - 5\"},{\"type\":1,\"content\":{\"json\":\"💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\\r\\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\\r\\n2. Select the specific Azure subscription for which you want to configure the data export.\\r\\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\\r\\n4. Set the export target to **Log Analytics workspace**.\\r\\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\\r\\n6. From the export frequency options, select **Streaming** and **Snapshots**.\\r\\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\\r\\n\\r\\n[Learn more](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\\r\\n\\r\\n> **Notes**\\r\\n* To get full visibility, wait at least one week for the first snapshot to be exported.\\r\\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export).\"},\"showPin\":false,\"name\":\"Instructions\"},{\"type\":9,\"content\":{\"version\":\"KqlParameterItem/1.0\",\"crossComponentResources\":[\"{workspaceName}\"],\"parameters\":[{\"id\":\"ae721cb1-e030-4e02-8839-9c6a00f66c8a\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"workspaceName\",\"type\":5,\"description\":\"Select at least one workspace that contains continuous export data based on the selected subscriptions\",\"isRequired\":true,\"multiSelect\":true,\"quote\":\"'\",\"delimiter\":\",\",\"query\":\"resources\\r\\n| where type =~ 'microsoft.operationalinsights/workspaces'\\r\\n| project id\",\"crossComponentResources\":[\"value::selected\"],\"value\":[\"value::all\"],\"typeSettings\":{\"resourceTypeFilter\":{\"microsoft.operationalinsights/workspaces\":true},\"additionalResourceOptions\":[\"value::all\"],\"showDefault\":false},\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"label\":\"Log Analytics Workspace\"},{\"id\":\"4f3a03fd-9968-4ee7-b6bc-d04d3bbe14a8\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"TimeRange\",\"label\":\"Time Range\",\"type\":4,\"description\":\"Filter the data of this report to one of these predefined time ranges\",\"isRequired\":true,\"value\":{\"durationMs\":2592000000},\"typeSettings\":{\"selectableValues\":[{\"durationMs\":259200000},{\"durationMs\":604800000},{\"durationMs\":1209600000},{\"durationMs\":2419200000},{\"durationMs\":2592000000}],\"allowCustom\":true}},{\"id\":\"0117bdc3-a4e2-476b-b7cc-3d1f486e67cf\",\"version\":\"KqlParameterItem/1.0\",\"name\":\"ErrorHandle\",\"type\":1,\"query\":\"let MissingTable = view () { print isMissing=1 };\\r\\nunion isfuzzy=true MissingTable, (SecureScores | getschema | summarize c=count() | project isMissing=iff(c > 0, 0, 1))\\r\\n| top 1 by isMissing asc\",\"crossComponentResources\":[\"{workspaceName}\"],\"isHiddenWhenLocked\":true,\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"}],\"style\":\"above\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\"},\"name\":\"Parameters\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\" Current score trends per subscription (not affected by the time range parameter)\\r\\n\"},\"customWidth\":\"50\",\"name\":\"text - 3\"},{\"type\":1,\"content\":{\"json\":\" Aggregated score for selected subscriptions over time\\r\\n\"},\"customWidth\":\"50\",\"name\":\"text - 4\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Current score trends per subscription (show current, 7-day change from the current score as %, 30-day change from the current score as %)\\r\\nlet startOfToday = startofday(now()); \\r\\nlet offsetWeek = 6d; \\r\\nlet offsetMonth = 29d; \\r\\nlet lookbackDurationWeek = 14d; \\r\\nlet lookbackDurationMonth = 45d; \\r\\nlet endTimeWeek = startOfToday - offsetWeek; \\r\\nlet startTimeWeek = endTimeWeek - lookbackDurationWeek; \\r\\nlet endTimeMonth = startOfToday - offsetMonth; \\r\\nlet startTimeMonth = endTimeMonth - lookbackDurationMonth; \\r\\nSecureScores \\r\\n| extend Day = startofday(TimeGenerated) \\r\\n| summarize arg_max(TimeGenerated, *) by Day, SecureScoresSubscriptionId \\r\\n| summarize arg_max(Day, *) by SecureScoresSubscriptionId \\r\\n| join kind = fullouter( \\r\\n SecureScores \\r\\n | extend Day = startofday(TimeGenerated) \\r\\n | where TimeGenerated > startTimeWeek and TimeGenerated <= endTimeWeek \\r\\n | summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId \\r\\n | project OldScoreSevenDays = PercentageScore, SecureScoresSubscriptionId \\r\\n ) \\r\\n on SecureScoresSubscriptionId \\r\\n| join kind = fullouter( \\r\\n SecureScores \\r\\n | extend Day = startofday(TimeGenerated) \\r\\n | where TimeGenerated > startTimeMonth and TimeGenerated <= endTimeMonth \\r\\n | summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId \\r\\n | project OldMonthScore = PercentageScore, SecureScoresSubscriptionId \\r\\n ) \\r\\n on SecureScoresSubscriptionId \\r\\n| extend DiffSevenDays = tostring(((PercentageScore - OldScoreSevenDays) / OldScoreSevenDays) * 100) \\r\\n| extend DiffSevenDays = iff(isempty(DiffSevenDays), \\\"\\\", DiffSevenDays) \\r\\n| extend DiffMonth = tostring(((PercentageScore - OldMonthScore) / OldMonthScore) * 100) \\r\\n| extend DiffMonth = iff(isempty(DiffMonth), \\\"\\\", DiffMonth) \\r\\n| project SecureScoresSubscriptionId, CurrentScore = PercentageScore * 100, todouble(DiffSevenDays), todouble(DiffMonth)\",\"size\":0,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"exportFieldName\":\"SecureScoresSubscriptionId\",\"exportParameterName\":\"selectedSubscription\",\"exportDefaultValue\":\"All\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"SecureScoresSubscriptionId\",\"formatter\":15,\"formatOptions\":{\"linkTarget\":null,\"showIcon\":true,\"customColumnWidthSetting\":\"25ch\"}},{\"columnMatch\":\"CurrentScore\",\"formatter\":4,\"formatOptions\":{\"min\":0,\"max\":100,\"palette\":\"redGreen\",\"customColumnWidthSetting\":\"20ch\"},\"numberFormat\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumSignificantDigits\":2}}},{\"columnMatch\":\"DiffSevenDays\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"<\",\"thresholdValue\":\"0\",\"representation\":\"trenddown\",\"text\":\"{0}{1}\"},{\"operator\":\">\",\"thresholdValue\":\"0\",\"representation\":\"trendup\",\"text\":\"{0}{1}\"},{\"operator\":\"is Empty\",\"thresholdValue\":\"0\",\"representation\":\"Normal\",\"text\":\"N/A\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Blank\",\"text\":\"{0}{1}\"}],\"customColumnWidthSetting\":\"20ch\"},\"numberFormat\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumSignificantDigits\":2}}},{\"columnMatch\":\"DiffMonth\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"<\",\"thresholdValue\":\"0\",\"representation\":\"trenddown\",\"text\":\"{0}{1}\"},{\"operator\":\">\",\"thresholdValue\":\"0\",\"representation\":\"trendup\",\"text\":\"{0}{1}\"},{\"operator\":\"is Empty\",\"thresholdValue\":\"0\",\"representation\":\"Normal\",\"text\":\"N/A\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Blank\",\"text\":\"{0}{1}\"}],\"customColumnWidthSetting\":\"20ch\"},\"numberFormat\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":false,\"maximumSignificantDigits\":2}}}],\"rowLimit\":500,\"sortBy\":[{\"itemKey\":\"$gen_link_SecureScoresSubscriptionId_0\",\"sortOrder\":1}],\"labelSettings\":[{\"columnId\":\"SecureScoresSubscriptionId\",\"label\":\"Subscription name\"},{\"columnId\":\"CurrentScore\",\"label\":\"Current score %\"},{\"columnId\":\"DiffSevenDays\",\"label\":\"7-day change\"},{\"columnId\":\"DiffMonth\",\"label\":\"30-day change\"}]},\"sortBy\":[{\"itemKey\":\"$gen_link_SecureScoresSubscriptionId_0\",\"sortOrder\":1}]},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"ScoreTrends\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Aggregated score for all subscriptions over time\\r\\nSecureScores\\r\\n| where '{selectedSubscription}' == 'All' or SecureScoresSubscriptionId == '{selectedSubscription}'\\r\\n| where MaxScore>0\\r\\n| extend subscriptionScore = CurrentScore/MaxScore \\r\\n| extend subScoreXsubWeight = subscriptionScore*Weight \\r\\n| extend Day = startofday(TimeGenerated) \\r\\n| summarize upperValue = sum(subScoreXsubWeight), underValue = sum(todouble(Weight)) by Day\\r\\n| extend OverallScore = 100*((upperValue)/(underValue))\\r\\n| project OverallScore, Day\",\"size\":0,\"aggregation\":5,\"showAnnotations\":true,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"timeBrushParameterName\":\"TimeRange\",\"timeBrushExportOnlyWhenBrushed\":true,\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"timechart\",\"chartSettings\":{\"seriesLabelSettings\":[{\"seriesName\":\"overallScore\",\"label\":\"Overall Score\",\"color\":\"lightBlue\"}],\"ySettings\":{\"min\":0,\"max\":100}}},\"customWidth\":\"50\",\"showPin\":true,\"name\":\"ScoreOvertime\"}],\"exportParameters\":true},\"conditionalVisibilities\":[{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"},{\"parameterName\":\"ErrorHandle\",\"comparison\":\"isNotEqualTo\",\"value\":\"1\"}],\"name\":\"SecureScore\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\" Top recommendations with recent increase in unhealthy resources\\r\\n \\r\\n Recommendations with the most resources that have become unhealthy in the periods shown\"},\"customWidth\":\"50\",\"name\":\"UnhealthyRecommendations\"},{\"type\":1,\"content\":{\"json\":\" Security controls scores over time (weekly)\\r\\n\\r\\n\\r\\n\"},\"customWidth\":\"50\",\"name\":\"text - 3\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Top recommendations with recent increase in unhealthy resources\\r\\nSecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState =~ \\\"Unhealthy\\\"\\r\\n| summarize UnhealthyAssessedResources = dcount(AssessedResourceId),RecommendationName = any(RecommendationName) by RecommendationId\\r\\n| project RecommendationName, UnhealthyAssessedResources\\r\\n| sort by UnhealthyAssessedResources desc\\r\\n| take 10\",\"size\":0,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"table\",\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"RecommendationName\",\"formatter\":0,\"formatOptions\":{\"customColumnWidthSetting\":\"70ch\"},\"numberFormat\":{\"unit\":0,\"options\":{\"style\":\"decimal\",\"useGrouping\":false}},\"tooltipFormat\":{\"tooltip\":\"View recommendation '{0}'\"}},{\"columnMatch\":\"UnhealthyAssessedResources\",\"formatter\":4,\"formatOptions\":{\"min\":0,\"palette\":\"blue\",\"compositeBarSettings\":{\"labelText\":\"\",\"columnSettings\":[]},\"customColumnWidthSetting\":\"25ch\"}},{\"columnMatch\":\"RecommendationId\",\"formatter\":5}],\"labelSettings\":[{\"columnId\":\"RecommendationName\",\"label\":\"Recommendation name\"},{\"columnId\":\"UnhealthyAssessedResources\",\"label\":\"Unhealthy count\"}]},\"tileSettings\":{\"showBorder\":false,\"titleContent\":{\"columnMatch\":\"RecommendationName\",\"formatter\":1},\"leftContent\":{\"columnMatch\":\"UnhealthyCount\",\"formatter\":12,\"formatOptions\":{\"palette\":\"auto\"},\"numberFormat\":{\"unit\":17,\"options\":{\"maximumSignificantDigits\":3,\"maximumFractionDigits\":2}}}},\"graphSettings\":{\"type\":0,\"topContent\":{\"columnMatch\":\"RecommendationName\",\"formatter\":1},\"centerContent\":{\"columnMatch\":\"UnhealthyCount\",\"formatter\":1,\"numberFormat\":{\"unit\":17,\"options\":{\"maximumSignificantDigits\":3,\"maximumFractionDigits\":2}}}}},\"customWidth\":\"50\",\"name\":\"query - 7\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Security controls score over time (weekly) \\r\\nlet subscriptionsWeight = \\r\\n SecureScores\\r\\n | where '{selectedSubscription}' == 'All' or SecureScoresSubscriptionId == '{selectedSubscription}'\\r\\n | summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId \\r\\n | project SecureScoresSubscriptionId, SubscriptionWeight = Weight; \\r\\nSecureScoreControls \\r\\n| where MaxScore > 0\\r\\n| where IsSnapshot == true\\r\\n| extend Week = startofweek(TimeGenerated) \\r\\n| summarize arg_max(TimeGenerated, *) by SecureScoresSubscriptionId, ControlId, Week \\r\\n| join kind=inner(\\r\\n subscriptionsWeight\\r\\n ) on SecureScoresSubscriptionId \\r\\n| extend WeightedControlScore = PercentageScore * SubscriptionWeight \\r\\n| summarize WeightedScoreAvg = sum(WeightedControlScore)/sum(SubscriptionWeight)*100, ControlName = any(ControlName) by ControlId, Week\\r\\n| order by WeightedScoreAvg desc\",\"size\":0,\"aggregation\":5,\"showAnnotations\":true,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"visualization\":\"timechart\",\"graphSettings\":{\"type\":0,\"topContent\":{\"columnMatch\":\"ControlId\",\"formatter\":1},\"centerContent\":{\"columnMatch\":\"WeightedAvgPerControl\",\"formatter\":1,\"numberFormat\":{\"unit\":17,\"options\":{\"maximumSignificantDigits\":3,\"maximumFractionDigits\":2}}}},\"chartSettings\":{\"group\":\"ControlName\",\"createOtherGroup\":0,\"showLegend\":true,\"ySettings\":{\"numberFormatSettings\":{\"unit\":1,\"options\":{\"style\":\"decimal\",\"useGrouping\":true}},\"min\":0,\"max\":100}}},\"customWidth\":\"50\",\"name\":\"Controls\"}]},\"conditionalVisibilities\":[{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"},{\"parameterName\":\"ErrorHandle\",\"comparison\":\"isNotEqualTo\",\"value\":\"1\"}],\"name\":\"group - 8\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"items\":[{\"type\":1,\"content\":{\"json\":\" Resources changed over time\\r\\n \\r\\n Select a recommendation to see its changes\"},\"name\":\"text - 2\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"//Recommendations changes over time (count how many resources have been changed to unhealthy, heathy, and not applicable, per recommendation) \\r\\nlet unhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Unhealthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet healthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Healthy' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet notApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'NotApplicable' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notUnhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Unhealthy' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notHealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Healthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notNotApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'NotApplicable' \\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName) \\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId; \\r\\nlet notHealthyToHealthy = \\r\\n notHealthy \\r\\n | join (\\r\\n healthy\\r\\n ) on RecommendationId, AssessedResourceId \\r\\n | where TimeGenerated < TimeGenerated1 \\r\\n | summarize ToHealthyCount = count(), RecommendationName=any(RecommendationName) by RecommendationId;\\r\\nlet notUnhealthyToUnhealthy = \\r\\n notUnhealthy \\r\\n | join ( \\r\\n unhealthy\\r\\n ) on RecommendationId, AssessedResourceId \\r\\n | where TimeGenerated < TimeGenerated1 \\r\\n | summarize ToUnhealthyCount = count(), RecommendationName=any(RecommendationName) by RecommendationId;\\r\\nlet notNotApplicableToNotApplicable = \\r\\n notNotApplicable \\r\\n | join (\\r\\n notApplicable\\r\\n ) on RecommendationId, AssessedResourceId \\r\\n | where TimeGenerated < TimeGenerated1 \\r\\n | summarize ToNotApplicableCount = count(), RecommendationName=any(RecommendationName) by RecommendationId;\\r\\n// Union \\r\\nunion notHealthyToHealthy, notUnhealthyToUnhealthy, notNotApplicableToNotApplicable\\r\\n| summarize RecommendationName=any(RecommendationName), ToUnhealthyCount = sum(ToUnhealthyCount), ToHealthyCount = sum(ToHealthyCount), ToNotApplicableCount = sum(ToNotApplicableCount) by RecommendationId\\r\\n| order by ToUnhealthyCount desc\",\"size\":0,\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"timeContextFromParameter\":\"TimeRange\",\"exportedParameters\":[{\"fieldName\":\"RecommendationId\",\"parameterName\":\"RecommendationId\"},{\"fieldName\":\"RecommendationName\",\"parameterName\":\"RecommendationName\",\"parameterType\":1}],\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"RecommendationId\",\"formatter\":5},{\"columnMatch\":\"RecommendationName\",\"formatter\":7,\"formatOptions\":{\"linkTarget\":\"Url\",\"bladeOpenContext\":{\"bladeName\":\"RecommendationsBlade\",\"extensionName\":\"Microsoft_Azure_Security\",\"bladeParameters\":[{\"name\":\"assessmentKey\",\"source\":\"column\",\"value\":\"RecommendationId\"}]},\"customColumnWidthSetting\":\"100ch\"}},{\"columnMatch\":\"UnhealthyCount\",\"formatter\":8,\"formatOptions\":{\"palette\":\"redBright\"}},{\"columnMatch\":\"HealthyCount\",\"formatter\":8,\"formatOptions\":{\"palette\":\"green\"}},{\"columnMatch\":\"NotApplicableCount\",\"formatter\":8,\"formatOptions\":{\"palette\":\"gray\"}},{\"columnMatch\":\"AssessedResourceId\",\"formatter\":13,\"formatOptions\":{\"linkTarget\":null,\"showIcon\":true}}],\"labelSettings\":[{\"columnId\":\"RecommendationName\",\"label\":\"Recommendation name\"},{\"columnId\":\"ToUnhealthyCount\",\"label\":\"To unhealthy\"},{\"columnId\":\"ToHealthyCount\",\"label\":\"To healthy\"},{\"columnId\":\"ToNotApplicableCount\",\"label\":\"To not applicable\"}]},\"sortBy\":[]},\"name\":\"RecommendationStatusChanges\"},{\"type\":1,\"content\":{\"json\":\"To view changes over time on a specific recommendation, please select any from the list above.\",\"style\":\"info\"},\"conditionalVisibility\":{\"parameterName\":\"RecommendationId\",\"comparison\":\"isEqualTo\"},\"name\":\"ChangeLogBanner\"},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"let unhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Unhealthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId\\r\\n| project UnhealthyRecommendationId = RecommendationId, UnhealthyResourceId = AssessedResourceId, UnhealhyTime = TimeGenerated, tostring(SubscriptionId);\\r\\nlet notApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'NotApplicable'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId\\r\\n| project NARecommendationId = RecommendationId, NAResourceId = AssessedResourceId, NATime = TimeGenerated, tostring(SubscriptionId);\\r\\nlet healthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState == 'Healthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId\\r\\n| project HealthyRecommendationId = RecommendationId, HealthyResourceId = AssessedResourceId, HealhyTime = TimeGenerated, tostring(SubscriptionId);\\r\\nlet NotHealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Healthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet NotUnhealthy = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'Unhealthy'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet NotNotApplicable = SecurityRecommendation\\r\\n| extend SubscriptionId = iff(AssessedResourceId==\\\"N/A\\\", split(url_decode(RecommendationLink),'/')[9], split(AssessedResourceId, '/')[2])\\r\\n| where '{selectedSubscription}' == 'All' or SubscriptionId == '{selectedSubscription}'\\r\\n| where RecommendationState !~ 'NotApplicable'\\r\\n| where isnotempty(RecommendationId) and isnotempty(RecommendationName)\\r\\n| summarize arg_max(TimeGenerated, *) by RecommendationId, AssessedResourceId;\\r\\nlet 1_to_Healthy = \\r\\n NotHealthy\\r\\n | extend orignalState = RecommendationState\\r\\n | join healthy on $left.RecommendationId == $right.HealthyRecommendationId, $left.AssessedResourceId == $right.HealthyResourceId\\r\\n | where TimeGenerated < HealhyTime\\r\\n | extend update = \\\"To healthy\\\"\\r\\n | project RecommendationId, RecommendationName, Description, OriginalState = RecommendationState, update, TimeGenerated, RecommendationSeverity, tostring(SubscriptionId), AssessedResourceId, RecommendationLink;\\r\\n//1_to_Healthy\\r\\nlet 2_to_Unhealthy = \\r\\n NotUnhealthy\\r\\n | extend orignalState = RecommendationState\\r\\n | join unhealthy on $left.RecommendationId == $right.UnhealthyRecommendationId, $left.AssessedResourceId == $right.UnhealthyResourceId\\r\\n | where TimeGenerated < UnhealhyTime\\r\\n | extend update = \\\"To unhealthy\\\"\\r\\n | project RecommendationId, RecommendationName, Description, OriginalState = RecommendationState, update, TimeGenerated, RecommendationSeverity, tostring(SubscriptionId), AssessedResourceId, RecommendationLink;\\r\\n//2_to_Unhealthy\\r\\nlet 3_to_NotApplicable = \\r\\n NotNotApplicable\\r\\n | extend orignalState = RecommendationState\\r\\n | join notApplicable on $left.RecommendationId == $right.NARecommendationId, $left.AssessedResourceId == $right.NAResourceId\\r\\n | where TimeGenerated < NATime\\r\\n | extend update = \\\"To not applicable\\\"\\r\\n | extend NotApplicableReason = iff(isempty(NotApplicableReason), \\\"NA\\\", NotApplicableReason)\\r\\n | project RecommendationId, RecommendationName, Description, OriginalState = RecommendationState, update, TimeGenerated, RecommendationSeverity, tostring(SubscriptionId), AssessedResourceId, RecommendationLink, NotApplicableReason;\\r\\n// JOIN\\r\\nunion 1_to_Healthy, 2_to_Unhealthy, 3_to_NotApplicable\\r\\n| extend FullRecommendationLink = strcat(\\\"http://\\\",RecommendationLink)\\r\\n| extend AssessedResourceId = iff(AssessedResourceId==\\\"N/A\\\", extract(\\\".*onPremiseMachines/(.+)\\\",1, url_decode(RecommendationLink)), AssessedResourceId)\\r\\n| project-away RecommendationLink\\r\\n| where RecommendationId == '{RecommendationId}'\",\"size\":0,\"title\":\"Changes for \\\"{RecommendationName}\\\"\",\"noDataMessage\":\"No data available. Check your continuous export configuration for the selected workspaces.\",\"showExportToExcel\":true,\"exportToExcelOptions\":\"all\",\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"RecommendationId\",\"formatter\":5},{\"columnMatch\":\"RecommendationName\",\"formatter\":5},{\"columnMatch\":\"Description\",\"formatter\":5},{\"columnMatch\":\"SubscriptionId\",\"formatter\":15,\"formatOptions\":{\"linkTarget\":null,\"showIcon\":true}},{\"columnMatch\":\"NotApplicableReason\",\"formatter\":0,\"formatOptions\":{\"customColumnWidthSetting\":\"30ch\"}},{\"columnMatch\":\"FullRecommendationLink\",\"formatter\":7,\"formatOptions\":{\"linkTarget\":\"Url\",\"linkLabel\":\"View\",\"linkIsContextBlade\":false}}],\"rowLimit\":1000,\"hierarchySettings\":{\"treeType\":1,\"groupBy\":[\"update\"]},\"labelSettings\":[{\"columnId\":\"RecommendationName\",\"label\":\"Recommendation name\"},{\"columnId\":\"OriginalState\",\"label\":\"Original state\"},{\"columnId\":\"update\",\"label\":\"Updated state\"},{\"columnId\":\"TimeGenerated\",\"label\":\"Time of change\"},{\"columnId\":\"RecommendationSeverity\",\"label\":\"Severity\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription\"},{\"columnId\":\"AssessedResourceId\",\"label\":\"Resource\"},{\"columnId\":\"NotApplicableReason\",\"label\":\"Reason\"},{\"columnId\":\"FullRecommendationLink\",\"label\":\"View recommendation\"}]},\"sortBy\":[]},\"conditionalVisibility\":{\"parameterName\":\"RecommendationId\",\"comparison\":\"isNotEqualTo\"},\"name\":\"ChangeLogDetails\"}]},\"conditionalVisibilities\":[{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"},{\"parameterName\":\"ErrorHandle\",\"comparison\":\"isNotEqualTo\",\"value\":\"1\"}],\"name\":\"ChangeLog\"},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"title\":\"Machines not sending current heartbeats\",\"items\":[{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-15m)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 15 minutes\",\"noDataMessage\":\"No machines found not reporting for more than 15 minutes.\",\"timeContext\":{\"durationMs\":86400000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 0\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-24h)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 24 hours\",\"noDataMessage\":\"No machines found not reporting for more than 24 hours.\",\"timeContext\":{\"durationMs\":172800000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 1\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-48h)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 48 hours\",\"noDataMessage\":\"No machines found not reporting for more than 48 hours.\",\"timeContext\":{\"durationMs\":604800000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 2\",\"styleSettings\":{\"showBorder\":true}},{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"Heartbeat\\n| join kind = leftanti (\\n Heartbeat\\n | where TimeGenerated > now(-7d)\\n) on SourceComputerId\\n| summarize arg_max(LatestTimestamp=TimeGenerated, SubscriptionId) by ResourceId\\n| order by LatestTimestamp\",\"size\":3,\"showAnalytics\":true,\"title\":\"Agent not reporting for more than 7 days\",\"noDataMessage\":\"No machines found not reporting for more than 7 days.\",\"timeContext\":{\"durationMs\":2592000000},\"queryType\":0,\"resourceType\":\"microsoft.operationalinsights/workspaces\",\"crossComponentResources\":[\"{workspaceName}\"],\"gridSettings\":{\"labelSettings\":[{\"columnId\":\"ResourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"LatestTimestamp\",\"label\":\"Latest Heartbeat\"},{\"columnId\":\"SubscriptionId\",\"label\":\"Subscription ID\"}]}},\"customWidth\":\"50\",\"name\":\"query - 3\",\"styleSettings\":{\"showBorder\":true}}]},\"conditionalVisibilities\":[{\"parameterName\":\"SelectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"NotReportingTab\"},{\"parameterName\":\"workspaceName\",\"comparison\":\"isNotEqualTo\"}],\"name\":\"MachinesNotReporting\",\"styleSettings\":{\"showBorder\":true}},{\"type\":12,\"content\":{\"version\":\"NotebookGroup/1.0\",\"groupType\":\"editable\",\"title\":\"Protection Status\",\"items\":[{\"type\":3,\"content\":{\"version\":\"KqlItem/1.0\",\"query\":\"securityresources\\n| where type =~ \\\"microsoft.security/assessments\\\" or type =~ \\\"microsoft.security/softwareInventories\\\"\\n| extend assessmentStatusCode = case(type =~ \\\"microsoft.security/assessments\\\", tostring(properties.status.code), \\\"\\\")\\n| extend severity = case(assessmentStatusCode =~ \\\"unhealthy\\\", tolower(tostring(properties.metadata.severity)), tolower(assessmentStatusCode))\\n| extend exemptionType = case(tolower(type) != \\\"microsoft.security/assessments\\\",\\\"N/A\\\", case(properties.status.cause =~ \\\"exempt\\\", \\\"Yes\\\", \\\"No\\\"))\\n| extend source = case(type =~ \\\"microsoft.security/assessments\\\", tostring(properties.resourceDetails.Source), \\\"\\\")\\n| extend resourceId = trim(\\\" \\\", tolower(tostring(case(source =~ \\\"azure\\\", properties.resourceDetails.Id,\\n source =~ \\\"aws\\\" and isnotempty(tostring(properties.resourceDetails.ConnectorId)), properties.resourceDetails.Id,\\n source =~ \\\"aws\\\", properties.resourceDetails.AzureResourceId,\\n source =~ \\\"gcp\\\", properties.resourceDetails.AzureResourceId,\\n type =~ \\\"microsoft.security/assessments\\\", extract(\\\"^(.+)/providers/Microsoft.Security/assessments/.+$\\\",1,id),extract(\\\"^(.+)/providers/Microsoft.Security/softwareInventories/.+$\\\",1,id)))))\\n| extend resourceName = iff(source =~ \\\"aws\\\" and isnotempty(tostring(properties.resourceDetails.ConnectorId)), tostring(properties.additionalData.ResourceName), extract(@\\\"(.+)/(.+)\\\", 2, resourceId))\\n| extend regexResourceId = extract_all(@\\\"/providers/([^/]+)(?:/([^/]+)/[^/]+(?:/([^/]+)/[^/]+)?)?/([^/]+)/[^/]+$\\\", resourceId)\\n| extend RegexResourceType = regexResourceId[0]\\n| extend mainType = RegexResourceType[1], extendedType = RegexResourceType[2], resourceType = RegexResourceType[3]\\n| extend providerName = RegexResourceType[0],\\n mainType = case(mainType !~ \\\"\\\", strcat(\\\"/\\\",mainType), \\\"\\\"),\\n extendedType = case(extendedType!~ \\\"\\\", strcat(\\\"/\\\",extendedType), \\\"\\\"),\\n resourceType = case(resourceType!~ \\\"\\\", strcat(\\\"/\\\",resourceType), \\\"\\\")\\n| extend array = split(resourceId, '/')\\n| extend typeFullPath = case(\\n array_length(array) == 3, 'subscription',\\n array_length(array) == 5, 'resourcegroups',\\n source =~ \\\"aws\\\" and isnotempty(tostring(properties.resourceDetails.ConnectorId)), tolower(strcat(providerName, mainType, \\\"/\\\", tostring(properties.additionalData.ResourceProvider), tostring(properties.additionalData.ResourceType))),\\n strcat(providerName, mainType, extendedType, resourceType))\\n| extend resourceType = case(typeFullPath =~ 'resourcegroups' or typeFullPath =~ 'subscription', typeFullPath, tolower(trim(\\\"/\\\", resourceType)))\\n| extend assessmentKey = case(type =~ \\\"microsoft.security/assessments\\\", tostring(name), \\\"\\\")\\n| extend softwareVendorName = case(type =~ \\\"microsoft.security/softwareInventories\\\", tostring(properties.vendor), \\\"\\\")\\n| extend softwareName = case(type =~ \\\"microsoft.security/softwareInventories\\\", tostring(properties.softwareName), \\\"\\\")\\n| extend softwareVersion = case(type =~ \\\"microsoft.security/softwareInventories\\\", tostring(properties.version), \\\"\\\")\\n| extend softwareNameIdentifier = case(type =~ \\\"microsoft.security/softwareInventories\\\", strcat(softwareVendorName, \\\",\\\", softwareName, \\\",\\\", softwareVersion), \\\"\\\")\\n| extend environment = case(type =~ \\\"microsoft.security/assessments\\\", properties.resourceDetails[\\\"Source\\\"], \\\"\\\")\\n| extend environment = case(environment =~ \\\"onpremise\\\", tolower(\\\"Non-Azure\\\"), tolower(environment))\\n| extend osTypeProperty = properties.additionalData[\\\"OS Type\\\"]\\n| extend osType = case(isnotempty(osTypeProperty), osTypeProperty, \\\"\\\")\\n| extend hasAgent = case(assessmentKey == \\\"d1db3318-01ff-16de-29eb-28b344515626\\\" or assessmentKey == \\\"45cfe080-ceb1-a91e-9743-71551ed24e94\\\" or assessmentKey == \\\"720a3e77-0b9a-4fa9-98b6-ddf0fd7e32c1\\\" or assessmentKey == \\\"27ac71b1-75c5-41c2-adc2-858f5db45b08\\\", assessmentStatusCode, \\\"\\\")\\n| extend hasAgent = case(assessmentKey == \\\"4ab6e3c5-74dd-8b35-9ab9-f61b30875b27\\\" or assessmentKey == \\\"181ac480-f7c4-544b-9865-11b8ffe87f47\\\" or assessmentKey == \\\"4fb67663-9ab9-475d-b026-8c544cced439\\\" , \\\"healthy\\\", hasAgent)\\n| extend workspaceAzureResourceId = case(hasAgent !~ \\\"\\\", properties.additionalData[\\\"Reporting workspace azure id\\\"], \\\"\\\")\\n| extend workspaceName = case(workspaceAzureResourceId !~ \\\"\\\", extract(@\\\"(.+)/(.+)\\\", 2, workspaceAzureResourceId), \\\"\\\")\\n| extend assessmentDisplayName = case(type =~ \\\"microsoft.security/assessments\\\", case(isnotempty(properties.displayName), properties.displayName, properties.metadata.displayName), \\\"\\\")\\n| extend assessmentIdentifier = case(type =~ \\\"microsoft.security/assessments\\\", strcat(assessmentKey, \\\",\\\" , assessmentDisplayName, \\\",\\\", severity), \\\"\\\")\\n| summarize assessmentsCount = count() , assessmentsIdentifier = make_list(assessmentIdentifier), softwareNamesIdentifier = make_list(softwareNameIdentifier), hasAgent = max(hasAgent), workspaceName = max(workspaceName), environment = max(environment), osType = max(osType), exemptionType = max(exemptionType) by resourceId, subscriptionId, resourceName, resourceType, typeFullPath, severity\\n| extend packAssessments = pack(severity, assessmentsCount)\\n| summarize assessmentsSummary = make_bag(packAssessments), assessmentsIdentifier = make_set(assessmentsIdentifier), softwareNamesIdentifier = make_set(softwareNamesIdentifier), hasAgent = max(hasAgent), workspaceName= max(workspaceName), environment = max(environment), osType= max(osType), exemptionType = max(exemptionType) by resourceId, subscriptionId, resourceName, resourceType, typeFullPath\\n| extend agentMonitoring = case(hasAgent =~ \\\"NotApplicable\\\" or hasAgent =~ \\\"\\\", '',\\n hasAgent =~ \\\"Unhealthy\\\", \\\"notInstalled\\\",\\n \\\"installed\\\")\\n| join kind=leftouter (\\n securityresources\\n | where type =~ \\\"microsoft.security/pricings\\\"\\n | project subscriptionId, bundleName = tolower(name), freeTrialRemainingTime = properties.freeTrialRemainingTime, pricingTier = tolower(properties.pricingTier)\\n | extend bundlesPricing = pack(bundleName, pricingTier)\\n | summarize subscriptionPricing = make_bag(bundlesPricing) by subscriptionId\\n ) on subscriptionId\\n| extend hasNoSoftwareData = case(array_length(softwareNamesIdentifier) == 1, case(set_has_element(softwareNamesIdentifier, \\\"\\\"), true, false), false)\\n| extend softwareNamesIdentifier = case(hasNoSoftwareData, softwareNamesIdentifier, set_difference(softwareNamesIdentifier, pack_array(\\\"\\\")))\\n| extend AssessmentsHigh = case(isnull(assessmentsSummary.high), 0 , toint(assessmentsSummary.high))\\n| extend AssessmentsMedium = case(isnull(assessmentsSummary.medium), 0 , toint(assessmentsSummary.medium))\\n| extend AssessmentsLow = case(isnull(assessmentsSummary.low), 0 , toint(assessmentsSummary.low))\\n| extend unhealthyAssessmentsCount = AssessmentsHigh + AssessmentsMedium + AssessmentsLow\\n| extend virtualmachines = case(isnull(subscriptionPricing), '' , subscriptionPricing.virtualmachines)\\n| extend virtualmachines = case(virtualmachines == 'free', 'off', 'on')\\n| extend sqlservers = case(isnull(subscriptionPricing), '' , subscriptionPricing.sqlservers)\\n| extend sqlservers = case(sqlservers == 'free', 'off', 'on')\\n| extend kubernetesservice = case(isnull(subscriptionPricing), '' , subscriptionPricing.kubernetesservice)\\n| extend kubernetesservice = case(kubernetesservice == 'free', 'off', 'on')\\n| extend containerregistry = case(isnull(subscriptionPricing), '' , subscriptionPricing.containerregistry)\\n| extend containerregistry = case(containerregistry == 'free', 'off', 'on')\\n| extend connectedcontainerregistry = case(isnull(subscriptionPricing), '' , subscriptionPricing.connectedcontainerregistry)\\n| extend connectedcontainerregistry = case(connectedcontainerregistry == 'free', 'off', 'on')\\n| extend sqlservervirtualmachines = case(isnull(subscriptionPricing), '' , subscriptionPricing.sqlservervirtualmachines)\\n| extend sqlservervirtualmachines = case(sqlservervirtualmachines == 'free', 'off', 'on')\\n| extend appservices = case(isnull(subscriptionPricing), '' , subscriptionPricing.appservices)\\n| extend appservices = case(appservices == 'free', 'off', 'on')\\n| extend storageaccounts = case(isnull(subscriptionPricing), '' , subscriptionPricing.storageaccounts)\\n| extend storageaccounts = case(storageaccounts == 'free', 'off', 'on')\\n| extend keyvaults = case(isnull(subscriptionPricing), '' , subscriptionPricing.keyvaults)\\n| extend keyvaults = case(keyvaults == 'free', 'off', 'on')\\n| extend opensourcerelationaldatabases = case(isnull(subscriptionPricing), '' , subscriptionPricing.opensourcerelationaldatabases)\\n| extend opensourcerelationaldatabases = case(opensourcerelationaldatabases == 'free', 'off', 'on')\\n| extend calculatedSubscriptionPricing = case(resourceType =~ \\\"subscription\\\" and isempty(subscriptionPricing) == false , iff(subscriptionPricing has \\\"free\\\" and subscriptionPricing has \\\"standard\\\", \\\"partial\\\", iff(subscriptionPricing has \\\"free\\\", \\\"off\\\", \\\"on\\\")), \\\"\\\")\\n| extend resourcePricing = case(typeFullPath =~ \\\"microsoft.classiccompute/virtualmachines\\\", virtualmachines, typeFullPath =~ \\\"microsoft.compute/virtualmachines\\\", virtualmachines, typeFullPath =~ \\\"microsoft.hybridcompute/machines\\\", virtualmachines, typeFullPath =~ \\\"microsoft.sql/servers\\\", sqlservers, typeFullPath =~ \\\"microsoft.containerservice/managedclusters\\\", kubernetesservice, typeFullPath =~ \\\"microsoft.kubernetes/connectedclusters\\\", kubernetesservice, typeFullPath =~ \\\"microsoft.containerregistry/registries\\\", containerregistry, typeFullPath =~ \\\"microsoft.security/connectedcontainerregistries\\\", connectedcontainerregistry, typeFullPath =~ \\\"microsoft.sqlvirtualmachine/sqlvirtualmachines\\\", sqlservervirtualmachines, typeFullPath =~ \\\"microsoft.web/sites\\\", appservices, typeFullPath =~ \\\"microsoft.storage/storageaccounts\\\", storageaccounts, typeFullPath =~ \\\"microsoft.compute/virtualmachinescalesets\\\", virtualmachines, typeFullPath =~ \\\"microsoft.keyvault/vaults\\\", keyvaults, typeFullPath =~ \\\"microsoft.dbforpostgresql/servers\\\", opensourcerelationaldatabases, typeFullPath =~ \\\"microsoft.dbformysql/servers\\\", opensourcerelationaldatabases, typeFullPath =~ \\\"microsoft.dbformariadb/servers\\\", opensourcerelationaldatabases, calculatedSubscriptionPricing)\\n| extend pricing = case(resourceType =~ \\\"subscription\\\" , calculatedSubscriptionPricing , resourcePricing)\\n| extend selectedSoftware = \\\"\\\"\\n| project resourceType, exemptionType, typeFullPath, resourceId, resourceName, subscriptionId, environment, osType, workspaceName, agentMonitoring, assessmentsIdentifier, assessmentsSummary, subscriptionPricing, unhealthyAssessmentsCount, pricing, softwareNamesIdentifier, selectedSoftware\\n| extend resourceGroup = tolower(tostring(split(resourceId, \\\"/\\\")[4]))\\n| order by unhealthyAssessmentsCount, subscriptionId, resourceType, resourceId\\n| where typeFullPath in ('microsoft.compute/virtualmachines', 'microsoft.hybridcompute/machines')\\n| where isnotempty(resourceId)\",\"size\":3,\"showAnalytics\":true,\"queryType\":1,\"resourceType\":\"microsoft.resourcegraph/resources\",\"crossComponentResources\":[\"value::all\"],\"visualization\":\"table\",\"showExpandCollapseGrid\":true,\"gridSettings\":{\"formatters\":[{\"columnMatch\":\"resourceType\",\"formatter\":5},{\"columnMatch\":\"exemptionType\",\"formatter\":5},{\"columnMatch\":\"typeFullPath\",\"formatter\":5},{\"columnMatch\":\"resourceName\",\"formatter\":5},{\"columnMatch\":\"agentMonitoring\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"installed\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"Unhealthy\",\"representation\":\"2\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"NotApplicable\",\"representation\":\"cancelled\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"notInstalled\",\"representation\":\"disabled\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"Disable\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"assessmentsIdentifier\",\"formatter\":5},{\"columnMatch\":\"assessmentsSummary\",\"formatter\":5},{\"columnMatch\":\"subscriptionPricing\",\"formatter\":5},{\"columnMatch\":\"unhealthyAssessmentsCount\",\"formatter\":3,\"formatOptions\":{\"min\":0,\"max\":15,\"palette\":\"greenRed\"}},{\"columnMatch\":\"pricing\",\"formatter\":18,\"formatOptions\":{\"thresholdsOptions\":\"icons\",\"thresholdsGrid\":[{\"operator\":\"==\",\"thresholdValue\":\"on\",\"representation\":\"success\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"text\":\"{0}{1}\"},{\"operator\":\"==\",\"thresholdValue\":\"off\",\"representation\":\"disabled\",\"text\":\"{0}{1}\"},{\"operator\":\"Default\",\"thresholdValue\":null,\"representation\":\"disabled\",\"text\":\"{0}{1}\"}]}},{\"columnMatch\":\"softwareNamesIdentifier\",\"formatter\":5},{\"columnMatch\":\"selectedSoftware\",\"formatter\":5},{\"columnMatch\":\"resourceGroup\",\"formatter\":5}],\"rowLimit\":1000,\"filter\":true,\"sortBy\":[{\"itemKey\":\"$gen_link_resourceId_3\",\"sortOrder\":2}],\"labelSettings\":[{\"columnId\":\"exemptionType\",\"label\":\"Resource Exemption exists\"},{\"columnId\":\"resourceId\",\"label\":\"Resource ID\"},{\"columnId\":\"subscriptionId\",\"label\":\"Subscription ID\"},{\"columnId\":\"environment\",\"label\":\"Environment\"},{\"columnId\":\"osType\",\"label\":\"OS Type\"},{\"columnId\":\"workspaceName\",\"label\":\"Workspace Name\"},{\"columnId\":\"agentMonitoring\",\"label\":\"Log Analytics agent status\"},{\"columnId\":\"unhealthyAssessmentsCount\",\"label\":\"Open Recommendations\"},{\"columnId\":\"pricing\",\"label\":\"Azure Defender status\"}]},\"sortBy\":[{\"itemKey\":\"$gen_link_resourceId_3\",\"sortOrder\":2}]},\"name\":\"query - 0\",\"styleSettings\":{\"showBorder\":true}}]},\"conditionalVisibility\":{\"parameterName\":\"SelectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"SecurityTab\"},\"name\":\"protectionStatus\",\"styleSettings\":{\"showBorder\":true}}]},\"conditionalVisibility\":{\"parameterName\":\"selectedTab\",\"comparison\":\"isEqualTo\",\"value\":\"Security\"},\"name\":\"Security\"}],\"isLocked\":false,\"fallbackResourceIds\":[\"workbookresourceid-stage\"]}",
"version": "1.0",
"sourceId": "[parameters('workbookResourceId')]",
"category": "[parameters('workbookType')]"
diff --git a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDevOps.json b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDevOps.json
index 6a14f5d8af..89745f3ee8 100644
--- a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDevOps.json
+++ b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookDevOps.json
@@ -1400,7 +1400,7 @@
{
"type": 1,
"content": {
- "json": "💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\r\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\r\n2. Select the specific Azure subscription for which you want to configure the data export.\r\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\r\n4. Set the export target to **Log Analytics workspace**.\r\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\r\n6. From the export frequency options, select **Streaming** and **Snapshots**.\r\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\r\n\r\n[Learn more](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\r\n\r\n> **Notes**\r\n* To get full visibility, wait at least one week for the first snapshot to be exported.\r\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export)."
+ "json": "💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\r\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\r\n2. Select the specific Azure subscription for which you want to configure the data export.\r\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\r\n4. Set the export target to **Log Analytics workspace**.\r\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\r\n6. From the export frequency options, select **Streaming** and **Snapshots**.\r\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\r\n\r\n[Learn more](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\r\n\r\n> **Notes**\r\n* To get full visibility, wait at least one week for the first snapshot to be exported.\r\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export)."
},
"showPin": false,
"name": "Instructions"
diff --git a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookFull.json b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookFull.json
index ea0fed94ff..1de21157ea 100644
--- a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookFull.json
+++ b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookFull.json
@@ -1607,7 +1607,7 @@
{
"type": 1,
"content": {
- "json": "💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\r\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\r\n2. Select the specific Azure subscription for which you want to configure the data export.\r\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\r\n4. Set the export target to **Log Analytics workspace**.\r\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\r\n6. From the export frequency options, select **Streaming** and **Snapshots**.\r\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\r\n\r\n[Learn more](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\r\n\r\n> **Notes**\r\n* To get full visibility, wait at least one week for the first snapshot to be exported.\r\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export)."
+ "json": "💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\r\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\r\n2. Select the specific Azure subscription for which you want to configure the data export.\r\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\r\n4. Set the export target to **Log Analytics workspace**.\r\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\r\n6. From the export frequency options, select **Streaming** and **Snapshots**.\r\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\r\n\r\n[Learn more](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\r\n\r\n> **Notes**\r\n* To get full visibility, wait at least one week for the first snapshot to be exported.\r\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export)."
},
"showPin": false,
"name": "Instructions"
diff --git a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookITPro.json b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookITPro.json
index f7cafe50df..2beb6c41c6 100644
--- a/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookITPro.json
+++ b/azure_jumpstart_arcbox/artifacts/mgmtMonitorWorkbookITPro.json
@@ -750,7 +750,7 @@
{
"type": 1,
"content": {
- "json": "💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\r\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\r\n2. Select the specific Azure subscription for which you want to configure the data export.\r\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\r\n4. Set the export target to **Log Analytics workspace**.\r\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\r\n6. From the export frequency options, select **Streaming** and **Snapshots**.\r\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\r\n\r\n[Learn more](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\r\n\r\n> **Notes**\r\n* To get full visibility, wait at least one week for the first snapshot to be exported.\r\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://docs.microsoft.com/en-us/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export)."
+ "json": "💡 To use this workbook, you'll need to configure **continuous export** to export data to a Log Analytics workspace:\r\n1. From Microsoft Defender for Cloud's sidebar, select **Environment Settings**.\r\n2. Select the specific Azure subscription for which you want to configure the data export.\r\n3. From the sidebar of the settings page for that subscription, select **Continuous Export**.\r\n4. Set the export target to **Log Analytics workspace**.\r\n5. Select the following data types: **Security recommendations** and **Secure Score (Preview)**.\r\n6. From the export frequency options, select **Streaming** and **Snapshots**.\r\n7. Make sure to select ArcBox's subscription, resource group and Log Analytics workspace as the export target. Select Save.\r\n\r\n[Learn more](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-portal#set-up-a-continuous-export)\r\n\r\n> **Notes**\r\n* To get full visibility, wait at least one week for the first snapshot to be exported.\r\n* To configure continuous export across your organization, use the supplied Azure Policy 'DeployIfNotExist' policies described [here](https://learn.microsoft.com/azure/security-center/continuous-export?tabs=azure-policy#set-up-a-continuous-export)."
},
"showPin": false,
"name": "Instructions"
diff --git a/azure_jumpstart_hcibox/artifacts/SDN/CertHelpers.ps1 b/azure_jumpstart_hcibox/artifacts/SDN/CertHelpers.ps1
index 5e89d5b776..94e0acb7cb 100644
--- a/azure_jumpstart_hcibox/artifacts/SDN/CertHelpers.ps1
+++ b/azure_jumpstart_hcibox/artifacts/SDN/CertHelpers.ps1
@@ -1,5 +1,5 @@
# --------------------------------------------------------------
-# Copyright Microsoft Corporation. All Rights Reserved.
+# Copyright � Microsoft Corporation. All Rights Reserved.
# Microsoft Corporation (or based on where you live, one of its affiliates) licenses this sample code for your internal testing purposes only.
# Microsoft provides the following sample code AS IS without warranty of any kind. The sample code arenot supported under any Microsoft standard support program or services.
# Microsoft further disclaims all implied warranties including, without limitation, any implied warranties of merchantability or of fitness for a particular purpose.
@@ -118,7 +118,7 @@ Function AddCertToLocalMachineStore($certFullPath, $storeName, $securePassword)
}
else
{
- # https://msdn.microsoft.com/en-us/library/system.security.cryptography.x509certificates.x509keystorageflags(v=vs.110).aspx
+ # https://msdn.microsoft.com/library/system.security.cryptography.x509certificates.x509keystorageflags(v=vs.110).aspx
$certificate.import($certFullPath, $securePassword, "MachineKeySet,PersistKeySet")
}
diff --git a/azure_jumpstart_hcibox/artifacts/SDN/SDNExpressUI.psm1 b/azure_jumpstart_hcibox/artifacts/SDN/SDNExpressUI.psm1
index 9c9fe02336..849f094176 100644
--- a/azure_jumpstart_hcibox/artifacts/SDN/SDNExpressUI.psm1
+++ b/azure_jumpstart_hcibox/artifacts/SDN/SDNExpressUI.psm1
@@ -420,7 +420,7 @@ function SDNExpressUI {
- Plan SDN topic on docs.microsoft.com.
+ Plan SDN topic on docs.microsoft.com.
diff --git a/docs/FAQ/_index.md b/docs/FAQ/_index.md
deleted file mode 100644
index 0cfa99e92d..0000000000
--- a/docs/FAQ/_index.md
+++ /dev/null
@@ -1,87 +0,0 @@
----
-type: docs
-title: "Jumpstart FAQ"
-linkTitle: "Jumpstart FAQ"
-weight: 8
----
-
-# Jumpstart Frequently Asked Questions (FAQ)
-
-## General
-
-### Can I contribute to the Jumpstart?
-
-Absolutely! The Jumpstart is a community-driven open-source project, and all contributions are welcomed. To get started, review the [Jumpstart Scenario Write-up Guidelines](https://azurearcjumpstart.io/scenario_guidelines/) and our [Code of Conduct](https://azurearcjumpstart.io/code_of_conduct/).
-
-## Jumpstart ArcBox
-
-### What are the use cases for ArcBox?
-
-ArcBox is a virtual hybrid sandbox that can be used to explore Azure Arc capabilities, build quick demo environments, support proof-of-concept projects, and even provide a testing platform for specific hybrid scenarios. Many partners and customers use ArcBox to quickly get hands-on with Azure Arc technology because its quick to deploy with minimal requirements.
-
-### What is required to deploy ArcBox?
-
-ArcBox deployment requires an Azure service principal with Contributor or Owner role-based access control (RBAC) on an Azure subscription and resource group. You can deploy ArcBox using the Azure portal, Az CLI, Bicep, or Terraform. The service principal is required to run the automation scripts that deploy and configure ArcBox features. You can view how the service principal is used by exploring the ArcBox code on our [public GitHub repository](https://github.com/microsoft/azure_arc).
-
-### What Azure regions can ArcBox be deployed to?
-
-ArcBox can be deployed to the following regions:
-
-- East US
-- East US 2
-- Central US
-- West US 2
-- North Europe
-- West Europe
-- France Central
-- UK South
-- Australia East
-- Japan East
-- Korea Central
-- Southeast Asia
-
-### What are the different "flavors" of ArcBox?
-
-ArcBox offers three different configurations, or "flavors", that allow the user to choose their own experience.
-
-- [ArcBox "Full"](https://azurearcjumpstart.io/azure_jumpstart_arcbox/Full) - The core ArcBox experience with Azure Arc-enabled servers, Kubernetes, and data services capabilities.
-- [ArcBox for IT Pros](https://azurearcjumpstart.io/azure_jumpstart_arcbox/ITPro) - This essential Azure Arc-enabled servers sandbox includes a mix of Microsoft Windows and Linux servers managed using the included capabilities such Azure Monitor, Microsoft Defender for Cloud, Azure Policy, Update Management and more.
-- [ArcBox for DevOps](https://azurearcjumpstart.io/azure_jumpstart_arcbox/DevOps) - This essential Azure Arc-enabled Kubernetes sandbox with the included capabilities such as GitOps, Open Service Mesh (OSM), secretes management, monitoring, and more.
-- [ArcBox for DataOps](https://azurearcjumpstart.io/azure_jumpstart_arcbox/DataOps) - This essential Azure Arc-enabled SQL Managed Instance sandbox with the included capabilities such as AD authentication, disaster recovery, point-in-time restore, migration, and more.
-
-### What are the costs of using ArcBox?
-
-ArcBox incurs normal Azure consumption charges for various Azure resources, such as virtual machines and storage. Each flavor of ArcBox uses a different combination of Azure resources and therefore, costs vary depending on the flavor used. You can view example estimates of ArcBox costs per flavor by clicking the links below.
-
-- [ArcBox Full cost estimate](https://aka.ms/ArcBoxFullCost)
-- [ArcBox for ITPro cost estimate](https://aka.ms/ArcBoxITProCost)
-- [ArcBox for DevOps cost estimate](https://aka.ms/ArcBoxDevOpsCost)
-- [ArcBox for DataOps cost estimate](https://aka.ms/ArcBoxDataOpsCost)
-
-### Where can I go if I have trouble deploying or using ArcBox?
-
-Each ArcBox flavor has a troubleshooting section of its documentation that you can review for common issues:
-
-- [Troubleshooting ArcBox Full](https://azurearcjumpstart.io/azure_jumpstart_arcbox/full/#basic-troubleshooting)
-- [Troubleshooting ArcBox for IT Pros](https://azurearcjumpstart.io/azure_jumpstart_arcbox/ITPro/#basic-troubleshooting)
-- [Troubleshooting ArcBox for DevOps](https://azurearcjumpstart.io/azure_jumpstart_arcbox/DevOps/#basic-troubleshooting)
-- [Troubleshooting ArcBox for DataOps](https://azurearcjumpstart.io/azure_jumpstart_arcbox/DataOps/#basic-troubleshooting)
-
-If you're still stuck, please [submit an issue](https://github.com/microsoft/azure_arc/issues/new/choose) on our GitHub repository and the Jumpstart team will try to assist you.
-
-## Jumpstart HCIBox
-
-### What Azure regions can HCIBox be deployed to?
-
-HCIBox can be deployed to the following regions:
-
-- East US
-- East US 2
-- West US 2
-- North Europe
-
-### What are the costs of using HCIBox?
-
-HCIBox incurs normal Azure consumption charges for various Azure resources such as virtual machines and storage. You can view example estimates of HCIBox costs per flavor by clicking the links below.
-
-- [HCIBox cost estimate](https://aka.ms/HCIBoxCost)
diff --git a/docs/_index.md b/docs/_index.md
deleted file mode 100755
index b02db4d12b..0000000000
--- a/docs/_index.md
+++ /dev/null
@@ -1,7 +0,0 @@
----
-type: docs
----
-
-
-
-#
diff --git a/docs/azure_arc_jumpstart/_index.md b/docs/azure_arc_jumpstart/_index.md
deleted file mode 100644
index 9218df8428..0000000000
--- a/docs/azure_arc_jumpstart/_index.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-type: docs
-title: "Azure Arc Jumpstart Scenarios"
-linkTitle: "Jumpstart Scenarios"
-weight: 2
----
\ No newline at end of file
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/_index.md
deleted file mode 100644
index b0234b4895..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Arc-enabled app services"
-linkTitle: "Azure Arc-enabled app services"
-weight: 6
-description: >-
- The deployment scenarios in this section will guide you through deploying and working with Azure Arc-enabled app services on multiple infrastructure platforms.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/_index.md
deleted file mode 100644
index b3119375f0..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Kubernetes Service"
-linkTitle: "Azure Kubernetes Service"
-weight: 1
-description: >-
- If you do not yet have a Kubernetes cluster, the scenarios in this section will guide on creating an AKS cluster with Azure Arc-enabled app services integration in an automated fashion using ARM templates.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/_index.md
deleted file mode 100644
index bb84420a99..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/_index.md
+++ /dev/null
@@ -1,280 +0,0 @@
----
-type: docs
-title: "App Service (Container) ARM Template"
-linkTitle: "App Service (Container) ARM Template"
-weight: 2
-description: >
----
-
-## Deploy an App Service app using custom container on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled app services](https://docs.microsoft.com/azure/app-service/overview-arc-integration) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an App Service plan, a sample Web Application (Web App) and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled app services.
-
-> **NOTE: Currently, Azure Arc-enabled app services is in preview.**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/aks.json) - Deploys the AKS cluster where all the Azure Arc app services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled app services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell script that deploy the AKS cluster and will configure Azure Arc-enabled app services Kubernetes environment on the AKS cluster.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deployAppService`_ - Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**true**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**false**_.
- - _`deployAPIMgmt`_ - Boolean that sets whether or not to deploy a self-hosted Azure API Management gateway. For this scenario, we leave it set to _**false**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`adminEmail`_ - an email address that will be used on the Azure API Management deployment to receive all system notifications.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_app_services_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-AppSvc-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-AppSvc-Demo \
- --name arcappsvc \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~10-20min**
-
- > **NOTE: Since Azure Arc-enabled app services is [currently in preview](https://docs.microsoft.com/azure/app-service/overview-arc-integration#public-preview-limitations), deployment regions availability is limited to East US and West Europe.**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal.
-
- ![Screenshot showing the ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure web application will be deployed on the cluster and be ready to use.
-
- > **NOTE: As you will notices from the screenshots below, during the Azure Arc-enabled app services environment, the _log-processor_ service pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Azure Arc-enabled app services Kubernetes components, visit the official [Azure Docs page](https://docs.microsoft.com/azure/app-service/overview-arc-integration#pods-created-by-the-app-service-extension).**
-
- ![Screenshot showing PowerShell logon script run](./11.png)
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- ![Screenshot showing PowerShell logon script run](./19.png)
-
- ![Screenshot showing PowerShell logon script run](./20.png)
-
- ![Screenshot showing PowerShell logon script run](./21.png)
-
- ![Screenshot showing PowerShell logon script run](./22.png)
-
- ![Screenshot showing PowerShell logon script run](./23.png)
-
- ![Screenshot showing PowerShell logon script run](./24.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and both the app service plan and the sample web application deployed on the cluster will be ready.
-
- ![Screenshot showing desktop wallpaper change](./25.png)
-
-- Since this scenario is deploying both the app service plan and a sample web application, you will also notice additional, newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled app services are using this resource to deploy the app services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - [**App Service Kubernetes Environment**](https://docs.microsoft.com/azure/app-service/overview-arc-integration#app-service-kubernetes-environment) - The App Service Kubernetes environment resource is required before apps may be created. It enables configuration common to apps in the custom location, such as the default DNS suffix.
-
- - [**App Service plan**](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) - In App Service (Web Apps, API Apps, or Mobile Apps), an app always runs in an App Service plan. In addition, Azure Functions also has the option of running in an App Service plan. An App Service plan defines a set of compute resources for a web app to run.
-
- - [**App Service**](https://docs.microsoft.com/azure/app-service/overview) - Azure App Service is an HTTP-based service for hosting web applications, REST APIs, and mobile back ends.
-
- ![Screenshot showing additional Azure resources in the resource group](26.png)
-
-- In this scenario, **a Docker, custom container Linux-based** sample Jumpstart web application was deployed. To open the deployed web application in your web browser, simply click the App Service resource and the created URL or the Browse button.
-
- ![Screenshot showing App Service resource in a resource group](./27.png)
-
- ![Screenshot showing the web application URL](./28.png)
-
- ![Screenshot showing the web application open in a web browser](./29.png)
-
-## Cluster extensions
-
-In this scenario, the Azure Arc-enabled app services cluster extension was deployed and used throughout this scenario in order to deploy the app services infrastructure.
-
-- In order to view cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes resource](./30.png)
-
- ![Screenshot showing Azure Arc-enabled Kubernetes cluster extensions settings](./31.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Screenshot showing the Delete Azure resource group button](./32.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_apimgmt_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_apimgmt_arm_template/_index.md
deleted file mode 100644
index 541769e3a3..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_apimgmt_arm_template/_index.md
+++ /dev/null
@@ -1,313 +0,0 @@
----
-type: docs
-title: "Azure API Management Gateway ARM Template"
-linkTitle: "Azure API Management Gateway ARM Template"
-weight: 5
-description: >
----
-
-## Deploy an Azure API Management gateway on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [a self-hosted Azure API Management Gateway](https://docs.microsoft.com/azure/api-management/how-to-deploy-self-hosted-gateway-azure-arc) deployed on an [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster that has been onboarded as an [Azure Arc-enabled Kubernetes cluster](https://docs.microsoft.com/azure/azure-arc/kubernetes/overview) using an [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an Azure API Management gateway, a backend API and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with the Azure API Management gateway.
-
-> **NOTE: Currently, API Management self-hosted gateway on Azure Arc is in preview. The deployment time for this scenario can take ~60-90 minutes**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure API Management logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/artifacts/AppServicesLogonScript.ps1) PowerShell script that deploy the AKS cluster and will configureAzure Arc-enabled app services Kubernetes environment on the AKS cluster.
-
- > **NOTE: Notice the AKS cluster will be deployed via the PowerShell script automation.**
-
-## Deployment
-
-> **NOTE: The deployment time for this scenario can take ~60-90 minutes**
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`kubernetesVersion`_ - AKS version
- - _`dnsPrefix`_ - AKS unique DNS prefix
- - _`deployAppService`_ Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**false**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**false**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`deployAPIMgmt`_ - Boolean that sets whether or not to deploy a self-hosted Azure API Management gateway. For this scenario, we leave it set to _**true**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`adminEmail`_ - an email address that will be used on the Azure API Management deployment to receive all system notifications.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_app_services_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-API-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-API-Demo \
- --name arcappsvc \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. At this point, the resource group should have **7 various Azure resources** deployed.
-
- ![ARM template deployment completed](./01.png)
-
- ![New Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_apimgmt_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_apimgmt_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_apimgmt_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/artifacts/AppServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure web application will be deployed on the cluster and be ready to use.
-
- > **NOTE: As you will notices from the screenshots below, during the Azure Arc-enabled app services environment, the _log-processor_ service pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Azure Arc-enabled app services Kubernetes components, visit the official [Azure Docs page](https://docs.microsoft.com/azure/app-service/overview-arc-integration#pods-created-by-the-app-service-extension).**
-
- ![PowerShell logon script run](./11.png)
-
- ![PowerShell logon script run](./12.png)
-
- ![PowerShell logon script run](./13.png)
-
- ![PowerShell logon script run](./14.png)
-
- ![PowerShell logon script run](./15.png)
-
- ![PowerShell logon script run](./16.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and both the API Management gateway and the sample API will be configured on the cluster.
-
- ![Wallpaper change](./17.png)
-
-- There will be newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled app services are using this resource to deploy the app services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - [**App Service Kubernetes Environment**](https://docs.microsoft.com/azure/app-service/overview-arc-integration#app-service-kubernetes-environment) - The App Service Kubernetes environment resource is required before apps may be created. It enables configuration common to apps in the custom location, such as the default DNS suffix.
-
- - [**App Service plan**](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) - In App Service (Web Apps, API Apps, or Mobile Apps), an app always runs in an App Service plan. In addition, Azure Functions also has the option of running in an App Service plan. An App Service plan defines a set of compute resources for a web app to run.
-
- - [**App Service**](https://docs.microsoft.com/azure/app-service/overview) - Azure App Service is an HTTP-based service for hosting web applications, REST APIs, and mobile back ends.
-
- ![Additional Azure resources in the resource group](./18.png)
-
-## API Management self-hosted gateway
-
-In this scenario, the Azure Arc-enabled API Management cluster extension was deployed and used throughout this scenario in order to deploy the self-hosted API Management gateway services infrastructure.
-
-- In order to view cluster extensions, click on theAzure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Azure Arc-enabled Kubernetes resource](./19.png)
-
- ![Azure Arc-enabled Kubernetes cluster extensions settings](./20.png)
-
-Deploying the API Management gateway extension to an Azure Arc-enabled Kubernetes cluster creates an Azure API Management self-hosted gateway. You can verify this from the portal by going to the Resource Group and selecting the API management service.
-
- ![API management service](./21.png)
-
-Select Gateways on the Deployment + infrastructure section.
-
- ![Self-hosted Gateway](./22.png)
-
-A self-hosted gateway should be deployed with one connected node.
-
- ![Connected node on self-hosted gateway](./23.png)
-
-In this scenario, a sample Demo conference API was deployed. To view the deployed API, simply click on the self-hosted gateway resource and select on APIs.
-
- ![Demo Conference API](./24.png)
-
-To demonstrate that the self-hosted gateway is processing API requests you need to identify two elements:
-
-- Public IP address of the self-hosted gateway, by running the command below from the client VM.
-
- ```powershell
- kubectl get svc -n apimgmt
- ```
-
- ![Self-hosted gateway public IP](./25.png)
-
-- API management subscription key, from the Azure portal on the API Management service resource select Subscriptions under APIs and select Show/hide keys for the one with display name "Built-in all-access subscription".
-
- ![Self-hosted gateway subscriptions](./26.png)
-
- ![Subscription key](./27.png)
-
-Once you have obtained these two parameters, replace them on the following code snippet and run it from the client VM PowerShell.
-
- ```powershell
- $publicip =
- $subscription =
-
- $url = "http://$($publicip):5000/conference/topics"
- $headers = @{
- 'Ocp-Apim-Subscription-Key' = $subscription
- 'Ocp-Apim-Trace' = 'true'
- }
- $i=1
- While ($i -le 10)
- {
- Invoke-RestMethod -URI $url -Headers $headers
- $i++
- }
- ```
-
- ![API calls test](./28.png)
-
-In the Overview page of the API Management service, you can now see how the self-hosted gateway API requests are now shown.
-
- ![API requests metrics](./29.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Delete Azure resource group](./30.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_function_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_function_arm_template/_index.md
deleted file mode 100644
index 4bd388178a..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_function_arm_template/_index.md
+++ /dev/null
@@ -1,316 +0,0 @@
----
-type: docs
-title: "Azure Function ARM Template"
-linkTitle: "Azure Function ARM Template"
-weight: 3
-description: >
----
-
-## Deploy Azure Function application on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled app services](https://docs.microsoft.com/azure/app-service/overview-arc-integration) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an App Service plan, a sample Azure Function application that sends messages to an Azure storage account queue and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled app services.
-
-> **NOTE: Currently, Azure Arc-enabled app services is in preview.**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/aks.json) - Deploys the AKS cluster where all the Azure Arc app services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled app services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell script that deploy the AKS cluster and will configure Azure Arc-enabled app services Kubernetes environment on the AKS cluster.
-
- > **NOTE: Notice the AKS cluster will be deployed via the PowerShell script automation.**
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deployAppService`_ - Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**false**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**true**_.
- - _`deployAPIMgmt`_ - Boolean that sets whether or not to deploy a self-hosted Azure API Management gateway. For this scenario, we leave it set to _**false**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`adminEmail`_ - an email address that will be used on the Azure API Management deployment to receive all system notifications.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_app_services_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-AppSvc-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-AppSvc-Demo \
- --name arcappsvc \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~10-20min**
-
- > **NOTE: Since Azure Arc-enabled app services is [currently in preview](https://docs.microsoft.com/azure/app-service/overview-arc-integration#public-preview-limitations), deployment regions availability is limited to East US and West Europe.**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal.
-
- ![Screenshot showing the ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_function_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_function_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_azure_function_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Function application will be deployed on the cluster and be ready to use.
-
- > **NOTE: As you will notices from the screenshots below, during the Azure Arc-enabled app services environment, the _log-processor_ service pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Azure Arc-enabled app services Kubernetes components, visit the official [Azure Docs page](https://docs.microsoft.com/azure/app-service/overview-arc-integration#pods-created-by-the-app-service-extension).**
-
- ![Screenshot showing PowerShell logon script run](./11.png)
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- ![Screenshot showing PowerShell logon script run](./19.png)
-
- ![Screenshot showing PowerShell logon script run](./20.png)
-
- ![Screenshot showing PowerShell logon script run](./21.png)
-
- ![Screenshot showing PowerShell logon script run](./22.png)
-
- ![Screenshot showing PowerShell logon script run](./23.png)
-
- ![Screenshot showing PowerShell logon script run](./24.png)
-
- ![Screenshot showing PowerShell logon script run](./25.png)
-
- ![Screenshot showing PowerShell logon script run](./26.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and both the app service plan and the Azure Function application deployed on the cluster will be ready.
-
- ![Screenshot showing desktop wallpaper change](./27.png)
-
-- Since this scenario is deploying both the app service plan and a sample Azure Function application, you will also notice additional, newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled app services are using this resource to deploy the app services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - [**App Service Kubernetes Environment**](https://docs.microsoft.com/azure/app-service/overview-arc-integration#app-service-kubernetes-environment) - The App Service Kubernetes environment resource is required before apps may be created. It enables configuration common to apps in the custom location, such as the default DNS suffix.
-
- - [**App Service plan**](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) - In App Service (Web Apps, API Apps, or Mobile Apps), an app always runs in an App Service plan. In addition, Azure Functions also has the option of running in an App Service plan. An App Service plan defines a set of compute resources for an Azure Function to run.
-
- - [**Azure Function**](https://docs.microsoft.com/azure/azure-functions/functions-overview) - Azure Functions is a serverless solution that allows you to write less code, maintain less infrastructure, and save on costs.
-
- - [Application Insights](https://docs.microsoft.com/azure/azure-monitor/app/app-insights-overview) - Application Insights, a feature of Azure Monitor, is an extensible Application Performance Management (APM) service for developers and DevOps professionals. Use it to monitor your live applications.
-
- - Azure Storage Account - The storage account deployed in this scenario is used for hosting the [queue storage](https://docs.microsoft.com/azure/storage/queues/storage-queues-introduction) where the Azure Function will be sending messages to that can be leveraged later in an application event-driven architecture.
-
- ![Screenshot showing additional Azure resources in the resource group](./28.png)
-
-- In this scenario, **a sample Jumpstart Azure Function application** was deployed. To open the deployed Function application in your web browser, simply click the Azure Function resource and the created URL or the Browse button.
-
- ![Screenshot showing the Azure Function URL](./29.png)
-
- ![Screenshot showing the Azure Function open in a web browser](./30.png)
-
-- To demonstrate the messaging queuing element and to show how messages are stored in the queue storage, the Azure Function deployment script also generates 10 sample messages. To view it, click on the newly created storage account and go to the "Queues" section where you will see the new queue and the stored messages.
-
- ![Screenshot showing the Azure storage account](./31.png)
-
- ![Screenshot showing the Azure storage queue](./32.png)
-
- ![Screenshot showing Azure Function messages in storage queue](./33.png)
-
-- Alternatively, you can view the same queue storage using the Azure Storage Explorer client application installed automatically in the Client VM or using the Azure Storage Browser portal-based view.
-
- ![Screenshot showing Azure Storage Explorer client application storage queue](./34.png)
-
- ![Screenshot showing Azure Storage Explorer portal-based view](./35.png)
-
- ![Screenshot showing Azure Storage Explorer portal-based view storage queue](./36.png)
-
-- To generate your own messages using the Function application, use the Function invoke URL. As part of the deployment script, a _`funcUrl.txt`_ text file located in the Client VM under _C:\Temp_ folder that includes invoke URL was created for you. Copy the URL and open it in your web browser while adding the message text to it using the _`?name=`_ syntax, for example, _`?name=Bilbo`_.
-
- ![Screenshot showing the funcUrl.txt file](./37.png)
-
- ![Screenshot showing invoke URL](./38.png)
-
- ![Screenshot showing invoke URL in web browser](./39.png)
-
-- Go back to the storage queue and see the new added message.
-
- ![Screenshot showing the new message in the storage queue](./40.png)
-
-## Cluster extensions
-
-In this scenario, the Azure Arc-enabled app services cluster extension was deployed and used throughout this scenario in order to deploy the app services infrastructure.
-
-- In order to view cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes resource](./41.png)
-
- ![Screenshot showing Azure Arc-enabled Kubernetes cluster extensions settings](./42.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Screenshot showing the Delete Azure resource group button](./43.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_container_apps_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_container_apps_arm_template/_index.md
deleted file mode 100644
index d6c892727f..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_container_apps_arm_template/_index.md
+++ /dev/null
@@ -1,293 +0,0 @@
----
-type: docs
-title: "Azure Container Apps ARM Template"
-linkTitle: "Azure Container Apps ARM Template"
-weight: 1
-description: >
----
-
-## Deploy Azure Container Apps on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Container Apps](https://learn.microsoft.com/azure/container-apps/azure-arc-overview) deployed on Azure Arc-enabled [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an Azure Kubernetes Service (AKS) cluster deployed with the Azure Container Apps connected environment, a sample Web Application and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Container Apps.
-
-> **NOTE: Currently, Azure Arc-enabled Container Apps is in preview.**
-
-The following Jumpstart scenario will guide you on how to run cloud-native application on [Azure Container Apps](https://azure.microsoft.com/products/container-apps/). The Azure Container Apps service enables you to run microservices and containerized applications on a serverless platform. Individual container apps are deployed to a single Container Apps environment, which acts as a secure boundary around groups of container apps.
-
-In this scenario, you will deploy a [Container Apps environment](https://learn.microsoft.com/azure/container-apps/environment) and a 3-node container app running in Azure. The app leverages [Dapr](https://learn.microsoft.com/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml) to simplify service to service invocation.
-
-- Store - The store app is the store's frontend app, running a [Blazor Server project](https://learn.microsoft.com/dotnet/architecture/blazor-for-web-forms-developers/introduction) that reaches out to the backend APIs.
-- Products API - This API is a [Swagger UI-enabled API](https://swagger.io/tools/swagger-ui/) that hands back product names and IDs to callers.
-- Inventory API - A simple API that provides a random number for a given product ID string. The values of each string/integer pair are stored in a memory cache so they are consistent between API calls.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP)
-
- To be able to complete the scenario and its related automation, Azure service principal assigned with the “Contributor” role is required. To create it, login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcK8s" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcK8s",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/aks.json) - Deploys the AKS cluster where all the Azure Container Apps on Azure Arc-enabled Kubernetes will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to to view diagnostic information of the Container Apps.
-
-- User remotes into client Windows VM, which automatically kicks off the [_ContainerAppsLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/ContainerAppsLogonScript.ps1) PowerShell script that deploys the AKS cluster, configures Azure Container Apps environment and deploy the Container Apps on the AKS cluster.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deployContainerApps`_ - Boolean that sets whether or not to deploy Azure Container Apps environment and a Web App. For this scenario, we leave it set to _**true**_.
- - _`deployAppService`_ - Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**false**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**false**_.
- - _`deployAPIMgmt`_ - Boolean that sets whether or not to deploy a self-hosted Azure API Management gateway. For this scenario, we leave it set to _**false**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`adminEmail`_ - an email address that will be used on the Azure API Management deployment to receive all system notifications.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_app_services_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-AppSvc-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-AppSvc-Demo \
- --name arccontainerapps \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~10-20min**
-
- > **NOTE: Since Azure Container App on Azure Arc-enabled Kubernetes is [currently in preview](https://learn.microsoft.com/azure/container-apps/azure-arc-overview#public-preview-limitations), deployment regions availability is limited to East US, East Asia and West Europe.**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal.
-
- ![Screenshot showing the ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_app_service_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_ContainerAppsLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/ARM/artifacts/ContainerAppsLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure web application will be deployed on the cluster and be ready to use.
-
- > **NOTE: As you will notices from the screenshots below, during the Azure Container Apps connected environment, the pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Container Apps extension, visit the official [Azure Docs page](https://learn.microsoft.com/azure/container-apps/azure-arc-overview#resources-created-by-the-container-apps-extension).**
-
- ![Screenshot showing PowerShell logon script run](./11.png)
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and the Azure Container Apps deployed on the cluster will be ready.
-
- ![Screenshot showing desktop wallpaper change](./19.png)
-
-- Since this scenario is deploying both the Azure Container Apps, you will also notice additional, newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled Kubernetes are used to deploy the Azure Containers Apps [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - **Container Apps Connected Environment**- Container Apps connected environment enables common configuration across apps but is not related to cluster operations. Conceptually, it's deployed into the Custom location resource, and app developers create apps in this environment.
-
- - **Container App** - Azure Container Apps manages the details of Kubernetes and container orchestration for you. Containers in Azure Container Apps can use any runtime, programming language, or development stack of your choice.
-
- ![Screenshot showing additional Azure resources in the resource group](20.png)
-
-- In this scenario, **ASP.NET Core Front-end and 2 Back-end APIs on Azure Container Apps** was deployed. To open the deployed web application in your web browser, simply click the Container App resource and the Application URL.
-
- ![Screenshot showing Store Container App in a resource group](./21.png)
-
- ![Screenshot showing the web application URL](./22.png)
-
- ![Screenshot showing the web application open in a web browser](./23.png)
-
-- Optionally, you can review the [Dapr](https://learn.microsoft.com/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml) configuration for the Container apps by navigating to the Dapr setting.
-
- ![Screenshot showing Store Container App Dapr](./24.png)
-
-## Cluster extensions
-
-In this scenario, the Azure Container Apps extension was deployed and used throughout this scenario in order to deploy the Azure Container Apps infrastructure.
-
-- In order to view cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes resource](./25.png)
-
- ![Screenshot showing Azure Arc-enabled Kubernetes cluster extensions settings](./26.png)
-
-## View your application's diagnostics
-
-In this scenario, the Log Analytics was configured with the Container Apps extension to view diagnostic information.
-
-- Navigate to the Log Analytics workspace that's configured with your Container Apps extension.
-
- ![Screenshot showing the Log Analytics workspace in the resource group](./27.png)
-
-- Select Logs in the left navigation settings, and run the below query.
-
- ```shell
- let StartTime = ago(72h);
- let EndTime = now();
- ContainerAppConsoleLogs_CL
- | where TimeGenerated between (StartTime .. EndTime)
- | where ContainerAppName_s =~ "store"
- ```
-
- ![Screenshot showing showing the Log Analytics workspace logs](./28.png)
-
- ![Screenshot showing showing the Log Analytics workspace logs query](./29.png)
-
- > **NOTE: If there's an error when running a query, try again in 10-15 minutes. There may be a delay for Log Analytics to start receiving logs from the application.**
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Screenshot showing the Delete Azure resource group button](./30.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_logic_app_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_logic_app_arm_template/_index.md
deleted file mode 100644
index e81bda07dc..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_logic_app_arm_template/_index.md
+++ /dev/null
@@ -1,339 +0,0 @@
----
-type: docs
-title: "Azure Logic App ARM Template"
-linkTitle: "Azure Logic App ARM Template"
-weight: 4
-description: >
----
-
-## Deploy Azure Logic App on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled app services](https://docs.microsoft.com/azure/app-service/overview-arc-integration) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an App Service plan, a sample Azure Logic App that reads messages from an Azure storage account queue and creates blobs in an Azure storage account container, and a Microsoft Windows Server 2022 (Datacenter) Azure VM installed & pre-configured with all the required tools needed to work with Azure Arc-enabled app services.
-
-> **NOTE: Currently, Azure Arc-enabled app services is in preview.**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled app services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/artifacts/AppServicesLogonScript.ps1) PowerShell script that deploy the AKS cluster and will configure Azure Arc-enabled app services Kubernetes environment on the AKS cluster.
-
- > **NOTE: Notice the AKS cluster will be deployed via the PowerShell script automation.**
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`kubernetesVersion`_ - AKS version
- - _`dnsPrefix`_ - AKS unique DNS prefix
- - _`deployAppService`_ - Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**false**_.
- - _`deployAPIMgmt`_ - Boolean that sets whether or not to deploy a self-hosted Azure API Management gateway. For this scenario, we leave it set to _**false**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**false**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**true**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_app_services_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-AppSvc-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-AppSvc-Demo \
- --name arcappsvc \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/aks/arm_template/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~5-10min**
-
- > **NOTE: Since Azure Arc-enabled app services is [currently in preview](https://docs.microsoft.com/azure/app-service/overview-arc-integration#public-preview-limitations), deployment regions availability is limited to East US and West Europe.**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. At this point, the resource group should have **7 various Azure resources** deployed.
-
- ![ARM template deployment completed](./01.png)
-
- ![New Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_logic_app_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_logic_app_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/aks/aks_logic_app_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/aks/arm_template/artifacts/AppServicesLogonScript.ps1) PowerShell logon script will start its run.
-
-- Let the script run its course and **do not close** the PowerShell session as this will be done for you once completed. Once the script will finish its run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Logic App will be deployed on the cluster and ready to use.
-
- > **NOTE: As you will notice from the screenshots below, during the Azure Arc-enabled app services environment, the _log-processor_ service pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Azure Arc-enabled app services Kubernetes components, visit the official [Azure Docs page](https://docs.microsoft.com/azure/app-service/overview-arc-integration#pods-created-by-the-app-service-extension).**
-
- ![PowerShell logon script run](./11.png)
-
- ![PowerShell logon script run](./12.png)
-
- ![PowerShell logon script run](./13.png)
-
- ![PowerShell logon script run](./14.png)
-
- ![PowerShell logon script run](./15.png)
-
- ![PowerShell logon script run](./16.png)
-
- ![PowerShell logon script run](./17.png)
-
- ![PowerShell logon script run](./18.png)
-
- ![PowerShell logon script run](./19.png)
-
- ![PowerShell logon script run](./20.png)
-
- ![PowerShell logon script run](./21.png)
-
- ![PowerShell logon script run](./22.png)
-
- ![PowerShell logon script run](./23.png)
-
- ![PowerShell logon script run](./24.png)
-
- ![PowerShell logon script run](./25.png)
-
- ![PowerShell logon script run](./26.png)
-
- ![PowerShell logon script run](./27.png)
-
- ![PowerShell logon script run](./28.png)
-
- ![PowerShell logon script run](./29.png)
-
- ![PowerShell logon script run](./30.png)
-
- ![PowerShell logon script run](./31.png)
-
- Once the script finishes its run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and both the app service plan and the Azure Logic App deployed on the cluster will be ready.
-
- ![Wallpaper change](./32.png)
-
-- Since this scenario is deploying both the app service plan and a sample Logic App application, you will also notice additional, newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled app services are using this resource to deploy the app services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - [**App Service Kubernetes Environment**](https://docs.microsoft.com/azure/app-service/overview-arc-integration#app-service-kubernetes-environment) - The App Service Kubernetes environment resource is required before apps may be created. It enables configuration common to apps in the custom location, such as the default DNS suffix.
-
- - [**App Service plan**](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) - In App Service (Web Apps, API Apps, or Mobile Apps), an app always runs in an App Service plan. In addition, Azure Logic Apps also has the option of running in an App Service plan. An App Service plan defines a set of compute resources for an Azure Logic App to run.
-
- - [**Azure Logic App**](https://docs.microsoft.com/azure/logic-apps/logic-apps-overview) - Azure Logic Apps is a cloud-based platform for creating and running automated workflows that integrate your apps, data, services, and systems.
-
- - [**API Connection**](https://docs.microsoft.com/azure/connectors/apis-list)A connector provides prebuilt operations that you can use as steps in your Logic Apps workflows.
-
- - [Application Insights](https://docs.microsoft.com/azure/azure-monitor/app/app-insights-overview) - Application Insights, a feature of Azure Monitor, is an extensible Application Performance Management (APM) service for developers and DevOps professionals. Use it to monitor your live applications.
-
- - Azure Storage Account - The storage account deployed in this scenario is used for hosting the [queue storage](https://docs.microsoft.com/azure/storage/queues/storage-queues-introduction) and [blob storage](https://docs.microsoft.com/azure/storage/blobs/storage-blobs-introduction) where the Azure Logic App will be creating files in response to messages in a queue.
-
- ![Additional Azure resources in the resource group](./33.png)
-
-- In this scenario, **a sample Jumpstart Azure Logic App** was deployed. To view the deployed Logic App, simply click the Azure Logic App resource.
-
- ![Azure Logic App resource](./34.png)
-
-- You can view the Logic App workflow by clicking on "Workflows" and then clicking the workflow name "CreateBlobFromQueueMessage".
-
- ![Azure Logic App detail](./35.png)
-
- ![Azure Logic App detail](./36.png)
-
-- To demonstrate the messaging queuing element and to show how blobs are created when messages are read from the queue storage, the Azure Logic App deployment script also generates 10 sample queue messages. To view it, click on the newly created storage account and go to the "Queues" section where you will see the queue named "jumpstart-queue". Note that the queue will be empty because the workflow automatically deletes messages from the queue after creating a new blob in the "jumpstart-blobs" container.
-
- ![Azure storage account](./37.png)
-
- ![Azure storage queue](./38.png)
-
-- Go back to the Azure storage account and click on Containers. From here you will see the "jumpstart-blobs" container. Open this container and view the blobs that were created by the Logic App.
-
- ![Azure storage container](./39.png)
-
- ![Azure storage blobs](./40.png)
-
-- Alternatively, you can view the storage details using the Azure Storage Explorer client application installed automatically in the Client VM or using the Azure Storage Explorer portal-based view.
-
- ![Azure Storage Explorer client application storage queue](./41.png)
-
- ![Azure Storage Explorer portal-based view](./42.png)
-
- ![Azure Storage Explorer portal-based view storage queue](./43.png)
-
-- To generate your own blobs using the Logic App, create a new message in the queue by using Azure Storage Explorer and clicking Add Message as shown below.
-
- ![Add queue message](./44.png)
-
- ![Add queue message](./45.png)
-
- ![Add queue message](./46.png)
-
-- Go back to the storage container and see the new added blob that was created automatically by the Logic App.
-
- ![New message in storage queue](./47.png)
-
-- As part of the deployment, an Application Insights instance was also provisioned to provide you with relevant performance and application telemetry.
-
- ![Application Insights instance](./48.png)
-
-## Cluster extensions
-
-In this scenario, the Azure Arc-enabled app services cluster extension was deployed and used throughout this scenario in order to deploy the app services infrastructure.
-
-- In order to view cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Azure Arc-enabled Kubernetes resource](./49.png)
-
- ![Azure Arc-enabled Kubernetes cluster extensions settings](./50.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Delete Azure resource group](./51.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/_index.md
deleted file mode 100644
index bd1dba45f8..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Kubernetes Cluster API"
-linkTitle: "Kubernetes Cluster API"
-weight: 2
-description: >-
- If you do not yet have a Kubernetes cluster, the scenarios in this section will guide on deploying Azure Arc-enabled app services on Cluster API (CAPI) Kubernetes in an automated fashion using ARM templates.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apimgmt_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apimgmt_arm_template/_index.md
deleted file mode 100644
index 43171c97e1..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apimgmt_arm_template/_index.md
+++ /dev/null
@@ -1,319 +0,0 @@
----
-type: docs
-title: "Azure API Management Gateway ARM Template"
-linkTitle: "Azure API Management Gateway ARM Template"
-weight: 3
-description: >
----
-
-## Deploy an Azure API Management gateway on Cluster API (CAPI) using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [a self-hosted Azure API Management Gateway](https://docs.microsoft.com/azure/api-management/how-to-deploy-self-hosted-gateway-azure-arc) deployed on [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/introduction.html) Kubernetes cluster and it's [Cluster API Azure provider (CAPZ)](https://cloudblogs.microsoft.com/opensource/2020/12/15/introducing-cluster-api-provider-azure-capz-kubernetes-cluster-management/) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have a CAPI Kubernetes cluster deployed with an Azure API Management gateway, a backend API and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with the Azure API Management gateway.
-
-> **NOTE: Currently, API Management self-hosted gateway on Azure Arc is in preview. The deployment time for this scenario can take ~60-90 minutes**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the Cluster API Book docs:
-
-"Cluster API requires an existing Kubernetes cluster accessible via kubectl; during the installation process the Kubernetes cluster will be transformed into a management cluster by installing the Cluster API provider components, so it is recommended to keep it separated from any application workload."
-
-in this scenario and as part of the automation flow (described below), a [Rancher K3s](https://rancher.com/docs/k3s/latest/en/) cluster will be deployed which will be used as the management cluster. This cluster will then be used to deploy the workload cluster using the Cluster API Azure provider (CAPZ).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_ubuntuCapi_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/ubuntuCapi.json) - Deploys an Ubuntu Linux VM which will have Rancher K3s installed and transformed into a Cluster API management cluster via the Azure CAPZ provider.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled app services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/artifacts/AppServicesLogonScript.ps1) PowerShell script that will configure Azure Arc-enabled app services Kubernetes environment on the CAPI cluster.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deployAppService`_ - Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**false**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**false**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`deployApiMgmt`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**true**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`adminEmail`_ - Your email address, it will be used to notify you once the API management deployment is done.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/app_svc_capi/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-API-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-API-Demo \
- --name arcappsvc \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~60-90 minutes**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. At this point, the resource group should have **34 various Azure resources** deployed.
-
- ![ARM template deployment completed](./01.png)
-
- ![New Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apimgmt_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apimgmt_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apimgmt_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_AppServicesLogonScript_](https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template/artifacts/AppServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure web application will be deployed on the cluster and be ready to use.
-
- > **NOTE: As you will notices from the screenshots below, during the Azure Arc-enabled app services environment, the _log-processor_ service pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Azure Arc-enabled app services Kubernetes components, visit the official [Azure Docs page](https://docs.microsoft.com/azure/app-service/overview-arc-integration#pods-created-by-the-app-service-extension).**
-
- ![PowerShell logon script run](./11.png)
-
- ![PowerShell logon script run](./12.png)
-
- ![PowerShell logon script run](./13.png)
-
- ![PowerShell logon script run](./14.png)
-
- ![PowerShell logon script run](./15.png)
-
- ![PowerShell logon script run](./16.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and both the API Management gateway and the sample API will be configured on the cluster.
-
- ![Wallpaper change](./17.png)
-
-- Since this scenario is deploying both the app service plan and a sample web application, you will also notice additional, newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled app services are using this resource to deploy the app services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - [**App Service Kubernetes Environment**](https://docs.microsoft.com/azure/app-service/overview-arc-integration#app-service-kubernetes-environment) - The App Service Kubernetes environment resource is required before apps may be created. It enables configuration common to apps in the custom location, such as the default DNS suffix.
-
- - [**App Service plan**](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) - In App Service (Web Apps, API Apps, or Mobile Apps), an app always runs in an App Service plan. In addition, Azure Functions also has the option of running in an App Service plan. An App Service plan defines a set of compute resources for a web app to run.
-
- - [**App Service**](https://docs.microsoft.com/azure/app-service/overview) - Azure App Service is an HTTP-based service for hosting web applications, REST APIs, and mobile back ends.
-
- ![Additional Azure resources in the resource group](./18.png)
-
-## API Management self-hosted gateway
-
-In this scenario, the Azure Arc-enabled API Management cluster extension was deployed and used throughout this scenario in order to deploy the self-hosted API Management gateway services infrastructure.
-
-- In order to view cluster extensions, click on the azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Azure Arc-enabled Kubernetes resource](./19.png)
-
- ![Azure Arc-enabled Kubernetes cluster extensions settings](./20.png)
-
-Deploying the API Management gateway extension to an Azure Arc-enabled Kubernetes cluster creates an Azure API Management self-hosted gateway. You can verify this from the portal by going to the Resource Group and selecting the API management service.
-
- ![API management service](./21.png)
-
-Select Gateways on the Deployment + infrastructure section.
-
- ![Self-hosted Gateway](./22.png)
-
-A self-hosted gateway should be deployed with one connected node.
-
- ![Connected node on self-hosted gateway](./23.png)
-
-In this scenario, a sample Demo conference API was deployed. To view the deployed API, simply click on the self-hosted gateway resource and select on APIs.
-
- ![Demo Conference API](./24.png)
-
-To demonstrate that the self-hosted gateway is processing API requests you need to identify two elements:
-
-- Public IP address of the self-hosted gateway, by running the command below from the client VM.
-
- ```powershell
- kubectl get svc -n apimgmt
- ```
-
- ![Self-hosted gateway public IP](./25.png)
-
-- API management subscription key, from the Azure portal on the API Management service resource select Subscriptions under APIs and select Show/hide keys for the one with display name "Built-in all-access subscription".
-
- ![Self-hosted gateway subscriptions](./26.png)
-
- ![Subscription key](./27.png)
-
-Once you have obtained these two parameters, replace them on the following code snippet and run it from the client VM PowerShell.
-
- ```powershell
- $publicip =
- $subscription =
-
- $url = "http://$($publicip):5000/conference/topics"
- $headers = @{
- 'Ocp-Apim-Subscription-Key' = $subscription
- 'Ocp-Apim-Trace' = 'true'
- }
- $i=1
- While ($i -le 10)
- {
- Invoke-RestMethod -URI $url -Headers $headers
- $i++
- }
- ```
-
- ![API calls test](./28.png)
-
-In the Overview page of the API Management service, you can now see how the self-hosted gateway API requests are now shown.
-
- ![API requests metrics](./29.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure Portal.
-
- ![Delete Azure resource group](./30.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apps_service_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apps_service_arm_template/_index.md
deleted file mode 100644
index 83d74d5f48..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apps_service_arm_template/_index.md
+++ /dev/null
@@ -1,281 +0,0 @@
----
-type: docs
-title: "App Service (Container) ARM Template"
-linkTitle: "App Service (Container) ARM Template"
-weight: 1
-description: >
----
-
-## Deploy an App Service app using custom container on Cluster API using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled app services](https://docs.microsoft.com/azure/app-service/overview-arc-integration) deployed on [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/introduction.html) Kubernetes cluster and it's [Cluster API Azure provider (CAPZ)](https://cloudblogs.microsoft.com/opensource/2020/12/15/introducing-cluster-api-provider-azure-capz-kubernetes-cluster-management/) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have a CAPI Kubernetes cluster deployed with an App Service plan, a sample Web Application (Web App) and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled app services.
-
-> **NOTE: Currently, Azure Arc-enabled app services is in preview.**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the Cluster API Book docs:
-
-"Cluster API requires an existing Kubernetes cluster accessible via kubectl; during the installation process the Kubernetes cluster will be transformed into a management cluster by installing the Cluster API provider components, so it is recommended to keep it separated from any application workload."
-
-in this scenario and as part of the automation flow (described below), a [Rancher K3s](https://rancher.com/docs/k3s/latest/en/) cluster will be deployed which will be used as the management cluster. This cluster will then be used to deploy the workload cluster using the Cluster API Azure provider (CAPZ).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_ubuntuCapi_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/ubuntuCapi.json) - Deploys an Ubuntu Linux VM which will have Rancher K3s installed and transformed into a Cluster API management cluster via the Azure CAPZ provider.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled app services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell script that will configure Azure Arc-enabled app services Kubernetes environment on the CAPI cluster.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deployAppService`_ - Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**true**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**false**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`deployApiMgmt`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-AppSvc-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-AppSvc-Demo \
- --name arcappsvc \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-25min**
-
- > **NOTE: Since Azure Arc-enabled app services is [currently in preview](https://docs.microsoft.com/azure/app-service/overview-arc-integration#public-preview-limitations), deployment regions availability is limited to East US and West Europe.**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. Included in this part of the automation, is also the onboarding of the cluster as an Azure Arc-enabled Kubernetes resource. Azure Arc-enabled app services are using this resource to deploy the app services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations) which will be deployed later, in the next phase of the scenario automation.
-
- ![Screenshot showing the ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apps_service_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apps_service_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/apps_service_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure web application will be deployed on the cluster and be ready to use.
-
- > **NOTE: As you will notices from the screenshots below, during the Azure Arc-enabled app services environment, the _log-processor_ service pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Azure Arc-enabled app services Kubernetes components, visit the official [Azure Docs page](https://docs.microsoft.com/azure/app-service/overview-arc-integration#pods-created-by-the-app-service-extension).**
-
- ![Screenshot showing PowerShell logon script run](./11.png)
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- ![Screenshot showing PowerShell logon script run](./19.png)
-
- ![Screenshot showing PowerShell logon script run](./20.png)
-
- ![Screenshot showing PowerShell logon script run](./21.png)
-
- ![Screenshot showing PowerShell logon script run](./22.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and both the app service plan and the sample web application deployed on the cluster will be ready.
-
- ![Screenshot showing desktop wallpaper change](./23.png)
-
-- Since this scenario is deploying both the app service plan and a sample web application, you will also notice additional, newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - [**App Service Kubernetes Environment**](https://docs.microsoft.com/azure/app-service/overview-arc-integration#app-service-kubernetes-environment) - The App Service Kubernetes environment resource is required before apps may be created. It enables configuration common to apps in the custom location, such as the default DNS suffix.
-
- - [**App Service plan**](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) - In App Service (Web Apps, API Apps, or Mobile Apps), an app always runs in an App Service plan. In addition, Azure Functions also has the option of running in an App Service plan. An App Service plan defines a set of compute resources for a web app to run.
-
- - [**App Service**](https://docs.microsoft.com/azure/app-service/overview) - Azure App Service is an HTTP-based service for hosting web applications, REST APIs, and mobile back ends.
-
- ![Screenshot showing additional Azure resources in the resource group](24.png)
-
-- In this scenario, **a Docker, custom container Linux-based** sample Jumpstart web application was deployed. To open the deployed web application in your web browser, simply click the App Service resource and the Browse button.
-
- ![Screenshot showing App Service resource in a resource group](./25.png)
-
- ![Screenshot showing the web application URL](./26.png)
-
- ![Screenshot showing the web application open in a web browser](./27.png)
-
-## Cluster extensions
-
-In this scenario, the Azure Arc-enabled app services cluster extension was deployed and used throughout this scenario in order to deploy the app services infrastructure. In addition, the Azure Monitor for Containers, Microsoft Cloud Defender and the Azure Policy extensions were also installed on the cluster.
-
-- In order to view cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes resource](./28.png)
-
- ![Screenshot showing Azure Arc-enabled Kubernetes cluster extensions settings](./29.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Screenshot showing the Delete Azure resource group button](./30.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/azure_function_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/azure_function_arm_template/_index.md
deleted file mode 100644
index e027bc4e8c..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/azure_function_arm_template/_index.md
+++ /dev/null
@@ -1,319 +0,0 @@
----
-type: docs
-title: "Azure Function ARM Template"
-linkTitle: "Azure Function ARM Template"
-weight: 2
-description: >
----
-
-## Deploy Azure Function application on Cluster API using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled app services](https://docs.microsoft.com/azure/app-service/overview-arc-integration) deployed on [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/introduction.html) Kubernetes cluster and it's [Cluster API Azure provider (CAPZ)](https://cloudblogs.microsoft.com/opensource/2020/12/15/introducing-cluster-api-provider-azure-capz-kubernetes-cluster-management/) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have a CAPI Kubernetes cluster deployed with an App Service plan, a sample Azure Function application that sends messages to an Azure storage account queue and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled app services.
-
-> **NOTE: Currently, Azure Arc-enabled app services is in preview.**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the Cluster API Book docs:
-
-"Cluster API requires an existing Kubernetes cluster accessible via kubectl; during the installation process the Kubernetes cluster will be transformed into a management cluster by installing the Cluster API provider components, so it is recommended to keep it separated from any application workload."
-
-in this scenario and as part of the automation flow (described below), a [Rancher K3s](https://rancher.com/docs/k3s/latest/en/) cluster will be deployed which will be used as the management cluster. This cluster will then be used to deploy the workload cluster using the Cluster API Azure provider (CAPZ).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_ubuntuCapi_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/ubuntuCapi.json) - Deploys an Ubuntu Linux VM which will have Rancher K3s installed and transformed into a Cluster API management cluster via the Azure CAPZ provider.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled app services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell script that will configure Azure Arc-enabled app services Kubernetes environment on the CAPI cluster.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deployAppService`_ - Boolean that sets whether or not to deploy App Service plan and a Web App. For this scenario, we leave it set to _**false**_.
- - _`deployFunction`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Function application. For this scenario, we leave it set to _**true**_.
- - _`deployLogicApp`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`deployApiMgmt`_ - Boolean that sets whether or not to deploy App Service plan and an Azure Logic App. For this scenario, we leave it set to _**false**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/arm_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/app_svc_capi/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-AppSvc-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-AppSvc-Demo \
- --name arcappsvc \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/app_svc_capi/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-25min**
-
- > **NOTE: Since Azure Arc-enabled app services is [currently in preview](https://docs.microsoft.com/azure/app-service/overview-arc-integration#public-preview-limitations), deployment regions availability is limited to East US and West Europe.**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. Included in this part of the automation, is also the onboarding of the cluster as an Azure Arc-enabled Kubernetes resource. Azure Arc-enabled app services are using this resource to deploy the app services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations) which will be deployed later, in the next phase of the scenario automation.
-
- ![Screenshot showing the ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-App-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/azure_function_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-App-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/azure_function_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_app_svc/cluster_api/capi_azure/azure_function_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-App-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-App-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_AppServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_app_services_jumpstart/cluster_api/capi_azure/ARM/artifacts/AppServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure web application will be deployed on the cluster and be ready to use.
-
- > **NOTE: As you will notices from the screenshots below, during the Azure Arc-enabled app services environment, the _log-processor_ service pods will be restarted and will go through multiple Kubernetes pod lifecycle stages. This is normal and can safely be ignored. To learn more about the various Azure Arc-enabled app services Kubernetes components, visit the official [Azure Docs page](https://docs.microsoft.com/azure/app-service/overview-arc-integration#pods-created-by-the-app-service-extension).**
-
- ![Screenshot showing PowerShell logon script run](./11.png)
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- ![Screenshot showing PowerShell logon script run](./19.png)
-
- ![Screenshot showing PowerShell logon script run](./20.png)
-
- ![Screenshot showing PowerShell logon script run](./21.png)
-
- ![Screenshot showing PowerShell logon script run](./22.png)
-
- ![Screenshot showing PowerShell logon script run](./23.png)
-
- ![Screenshot showing PowerShell logon script run](./24.png)
-
- ![Screenshot showing PowerShell logon script run](./25.png)
-
- ![Screenshot showing PowerShell logon script run](./26.png)
-
- ![Screenshot showing PowerShell logon script run](./27.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and both the app service plan and the sample web application deployed on the cluster will be ready.
-
- ![Screenshot showing desktop wallpaper change](./28.png)
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - [**App Service Kubernetes Environment**](https://docs.microsoft.com/azure/app-service/overview-arc-integration#app-service-kubernetes-environment) - The App Service Kubernetes environment resource is required before apps may be created. It enables configuration common to apps in the custom location, such as the default DNS suffix.
-
- - [**App Service plan**](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) - In App Service (Web Apps, API Apps, or Mobile Apps), an app always runs in an App Service plan. In addition, Azure Functions also has the option of running in an App Service plan. An App Service plan defines a set of compute resources for an Azure Function to run.
-
- - [**Azure Function**](https://docs.microsoft.com/azure/azure-functions/functions-overview) - Azure Functions is a serverless solution that allows you to write less code, maintain less infrastructure, and save on costs.
-
- - [Application Insights](https://docs.microsoft.com/azure/azure-monitor/app/app-insights-overview) - Application Insights, a feature of Azure Monitor, is an extensible Application Performance Management (APM) service for developers and DevOps professionals. Use it to monitor your live applications.
-
- - Azure Storage Account - The storage account deployed in this scenario is used for hosting the [queue storage](https://docs.microsoft.com/azure/storage/queues/storage-queues-introduction) where the Azure Function will be sending messages to that can be leveraged later in an application event-driven architecture.
-
- ![Screenshot showing additional Azure resources in the resource group](./29.png)
-
-- In this scenario, **a sample Jumpstart Azure Function application** was deployed. To open the deployed Function application in your web browser, simply click the Azure Function resource and the created URL or the Browse button.
-
- ![Screenshot showing the Azure Function URL](./30.png)
-
- ![Screenshot showing the Azure Function open in a web browser](./31.png)
-
-- To demonstrate the messaging queuing element and to show how messages are stored in the queue storage, the Azure Function deployment script also generates 10 sample messages. To view it, click on the newly created storage account and go to the "Queues" section where you will see the new queue and the stored messages.
-
- ![Screenshot showing the Azure storage account](./32.png)
-
- ![Screenshot showing the Azure storage queue](./33.png)
-
- ![Screenshot showing Azure Function messages in storage queue](./34.png)
-
-- Alternatively, you can view the same queue storage using the Azure Storage Explorer client application installed automatically in the Client VM or using the Azure Storage Browser portal-based view.
-
- ![Screenshot showing Azure Storage Explorer client application storage queue](./35.png)
-
- ![Screenshot showing Azure Storage Explorer portal-based view](./36.png)
-
- ![Screenshot showing Azure Storage Explorer portal-based view storage queue](./37.png)
-
-- To generate your own messages using the Function application, use the Function invoke URL. As part of the deployment script, a _`funcUrl.txt`_ text file located in the Client VM under _C:\Temp_ folder that includes invoke URL was created for you. Copy the URL and open it in your web browser while adding the message text to it using the _`?name=`_ syntax, for example, _`?name=Bilbo`_.
-
- ![Screenshot showing the funcUrl.txt file](./38.png)
-
- ![Screenshot showing invoke URL](./39.png)
-
- ![Screenshot showing invoke URL in web browser](./40.png)
-
-- Go back to the storage queue and see the new added message.
-
- ![Screenshot showing the new message in the storage queue](./41.png)
-
-## Cluster extensions
-
-In this scenario, the Azure Arc-enabled app services cluster extension was deployed and used throughout this scenario in order to deploy the app services infrastructure. In addition, the Azure Monitor for Containers, Microsoft Cloud Defender and the Azure Policy extensions were also installed on the cluster.
-
-- In order to view cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes resource](./42.png)
-
- ![Screenshot showing Azure Arc-enabled Kubernetes cluster extensions settings](./43.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Delete Azure resource group](./44.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/_index.md
deleted file mode 100644
index 9141b8093e..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Arc-enabled data services"
-linkTitle: "Azure Arc-enabled data services"
-weight: 5
-description: >-
- The deployment scenarios in this section will guide you through deploying and working with Azure Arc-enabled data services on multiple infrastructure platforms.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aks/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aks/_index.md
deleted file mode 100644
index c374572e7d..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aks/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Kubernetes Service"
-linkTitle: "Azure Kubernetes Service"
-weight: 1
-description: >-
- If you do not yet have a Kubernetes cluster, the scenarios in this section will guide on creating an AKS cluster with Azure Arc-enabled data services integration in an automated fashion using ARM templates.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_dc_vanilla_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_dc_vanilla_arm_template/_index.md
deleted file mode 100644
index e711c02c09..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_dc_vanilla_arm_template/_index.md
+++ /dev/null
@@ -1,282 +0,0 @@
----
-type: docs
-title: "Data Controller ARM Template"
-linkTitle: "Data Controller ARM Template"
-weight: 1
-description: >
----
-
-## Deploy a vanilla Azure Arc Data Controller in a directly connected mode on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc Data Services:
-
-![Screenshot showing the deployed architecture](./diagram.png)
-
-> **NOTE: Currently, Azure Arc-enabled PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcAppSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcAppSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/aks.json) - Deploys the AKS cluster where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the AKS cluster including the data controller.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources have been provisioned, you will be able to see them in the Azure portal. At this point, the resource group should have **8 various Azure resources** deployed.
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_dc_vanilla_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_dc_vanilla_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_dc_vanilla_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the post-run desktop](./29.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **11 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./30.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./31.png)
-
- ![Screenshot showing Azure Data Studio extensions](./32.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./33.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./34.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./35.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/_index.md
deleted file mode 100644
index d4f0b50591..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/_index.md
+++ /dev/null
@@ -1,368 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance ARM Template"
-linkTitle: "SQL Managed Instance ARM Template"
-weight: 2
-description: >
----
-
-## Deploy Azure SQL Managed Instance in directly connected mode on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an Azure Arc Data Controller, SQL Managed Instance, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services:
-
-![Screenshot showing the deployed architecture](./diagram.png)
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/aks.json) - Deploys the AKS cluster where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
- - User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the AKS cluster including the data controller and SQL Managed Instance.
-
- - In addition to deploying the data controller and SQL Managed Instance, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this Azure Arc-enabled SQL Managed Instance scenario we will set it to _**true**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, set this to either _**true**_ or _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
- > **Note:** In case you decided to deploy SQL Managed Instance in an highly-available fashion, refer to the ["Perform database failover with SQL Managed Instance Availability Groups"](../../day2/aks/aks_mssql_ha/_index.md) Jumpstart scenario as well as the ["High Availability with Azure Arc-enabled SQL Managed Instance"](https://learn.microsoft.com/azure/azure-arc/data/managed-instance-high-availability) product documentation.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources have been provisioned, you will be able to see them in the Azure portal. At this point, the resource group should have **8 various Azure resources** deployed (If you chose to deploy Azure Bastion, you will have **9 Azure resources**).
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and SQL Managed Instance will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the PowerShell logon script run](./32.png)
-
- ![Screenshot showing the PowerShell logon script run](./33.png)
-
- ![Screenshot showing the post-run desktop](./34.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and SQL Managed Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **12 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- - _Azure Arc-enabled SQL Managed Instance_ - The SQL Managed Instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./35.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./36.png)
-
- ![Screenshot showing Azure Data Studio extensions](./37.png)
-
-- Additionally, the SQL Managed Instance connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio SQL MI connection](./38.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
-![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./39.png)
-
-![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./40.png)
-
-## High Availability with SQL Always-On availability groups
-
-Azure Arc-enabled SQL Managed Instance is deployed on Kubernetes as a containerized application and uses kubernetes constructs such as stateful sets and persistent storage to provide built-in health monitoring, failure detection, and failover mechanisms to maintain service health. For increased reliability, you can also configure Azure Arc-enabled SQL Managed Instance to deploy with extra replicas in a high availability configuration.
-
-For showcasing and testing SQL Managed Instance with [Always On availability groups](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-high-availability#deploy-with-always-on-availability-groups), a dedicated [Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/) is available to help you simulate failures and get hands-on experience with this deployment model.
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance stress simulation
-
-Included in this scenario, is a dedicated SQL stress simulation tool named _SqlQueryStress_ automatically installed for you on the Client VM. _SqlQueryStress_ will allow you to generate load on the Azure Arc-enabled SQL Managed Instance that can be done used to showcase how the SQL database and services are performing as well to highlight operational practices described in the next section.
-
-- To start with, open the _SqlQueryStress_ desktop shortcut and connect to the SQL Managed Instance **primary** endpoint IP address. This can be found in the _SQLMI Endpoints_ text file desktop shortcut that was also created for you alongside the username and password you used to deploy the environment.
-
- ![Screenshot showing opened SqlQueryStress](./41.png)
-
- ![Screenshot showing SQLMI Endpoints text file](./42.png)
-
-> **NOTE: Secondary SQL Managed Instance endpoint will be available only when using the [HA deployment model ("Business Critical")](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/).**
-
-- To connect, use "SQL Server Authentication" and select the deployed sample _AdventureWorks_ database (you can use the "Test" button to check the connection).
-
- ![Screenshot showing SqlQueryStress connected](./43.png)
-
-- To generate some load, we will be running a simple stored procedure. Copy the below procedure and change the number of iterations you want it to run as well as the number of threads to generate even more load on the database. In addition, change the delay between queries to 1ms for allowing the stored procedure to run for a while.
-
- ```sql
- exec [dbo].[uspGetEmployeeManagers] @BusinessEntityID = 8
- ```
-
-- As you can see from the example below, the configuration settings are 100,000 iterations, five threads per iteration, and a 1ms delay between queries. These configurations should allow you to have the stress test running for a while.
-
- ![Screenshot showing SqlQueryStress settings](./44.png)
-
- ![Screenshot showing SqlQueryStress running](./45.png)
-
-### Azure Arc-enabled SQL Managed Instance monitoring using Grafana
-
-When deploying Azure Arc-enabled data services, a [Grafana](https://grafana.com/) instance is also automatically deployed on the same Kubernetes cluster and include built-in dashboards for both Kubernetes infrastructure as well SQL Managed Instance monitoring (PostgreSQL dashboards are included as well but we will not be covering these in this section).
-
-- Now that you have the _SqlQueryStress_ stored procedure running and generating load, we can look how this is shown in the the built-in Grafana dashboard. As part of the automation, a new URL desktop shortcut simply named "Grafana" was created.
-
- ![Screenshot showing Grafana desktop shortcut](./46.png)
-
-- [Optional] The IP address for this instance represents the Kubernetes _LoadBalancer_ external IP that was provision as part of Azure Arc-enabled data services. Use the _`kubectl get svc -n arc`_ command to view the _metricsui_ external service IP address.
-
- ![Screenshot showing metricsui Kubernetes service](./47.png)
-
-- To log in, use the same username and password that is in the _SQLMI Endpoints_ text file desktop shortcut.
-
- ![Screenshot showing Grafana username and password](./48.png)
-
-- Navigate to the built-in "SQL Managed Instance Metrics" dashboard.
-
- ![Screenshot showing Grafana dashboards](./49.png)
-
- ![Screenshot showing Grafana "SQL Managed Instance Metrics" dashboard](./50.png)
-
-- Change the dashboard time range to "Last 5 minutes" and re-run the stress test using _`SqlQueryStress`_ (in case it was already finished).
-
- ![Screenshot showing "Last 5 minutes" time range](./51.png)
-
-- You can now see how the SQL graphs are starting to show increased activity and load on the database instance.
-
- ![Screenshot showing increased load activity](./52.png)
-
- ![Screenshot showing increased load activity](./53.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./54.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/_index.md
deleted file mode 100644
index b551fc1572..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/_index.md
+++ /dev/null
@@ -1,298 +0,0 @@
----
-type: docs
-title: "PostgreSQL ARM Template"
-linkTitle: "PostgreSQL ARM Template"
-weight: 3
-description: >
----
-
-## Deploy Azure PostgreSQL in directly connected mode on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [PostgreSQL](https://docs.microsoft.com/azure/azure-arc/data/what-is-azure-arc-enabled-postgres-hyperscale) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an Azure Arc Data Controller, PostgreSQL instance, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services:
-
-![Screenshot showing the deployed architecture](./diagram.png)
-
-> **NOTE: Currently, Azure Arc-enabled PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/aks.json) - Deploys the AKS cluster where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the AKS cluster including the data controller and PostgreSQL.
-
-- In addition to deploying the data controller and PostgreSQL, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this Azure Arc-enabled PostgreSQL scenario we will set it to _**true**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. At this point, the resource group should have **8 various Azure resources** deployed (If you chose to deploy Azure Bastion, you will have **9 Azure resources**).
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and PostgreSQL will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the PowerShell logon script run](./32.png)
-
- ![Screenshot showing the post-run desktop](./33.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and PostgreSQL instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **12 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- - _Azure Arc-enabled PostgreSQL_ - The PostgreSQL instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./35.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./35.png)
-
- ![Screenshot showing Azure Data Studio extensions](./36.png)
-
-- Additionally, the PostgreSQL connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio PostgresSQL connection](./37.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
-![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./38.png)
-
-![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./39.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./40.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aro/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aro/_index.md
deleted file mode 100644
index 4da6491754..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aro/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Red Hat OpenShift"
-linkTitle: "Azure Red Hat OpenShift"
-weight: 3
-description: >-
- If you do not yet have an Azure Red Hat OpenShift cluster, the scenarios in this section will guide on creating an ARO cluster with Azure Arc-enabled data services integration in an automated fashion using ARM templates.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_dc_vanilla_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_dc_vanilla_arm_template/_index.md
deleted file mode 100644
index 7ac0ed5f8e..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_dc_vanilla_arm_template/_index.md
+++ /dev/null
@@ -1,302 +0,0 @@
----
-type: docs
-title: "Data Controller ARM Template"
-linkTitle: "Data Controller ARM Template"
-weight: 1
-description: >
----
-
-## Deploy a vanilla Azure Arc Data Controller in a directly connected mode on ARO using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) deployed on [Azure Red Hat OpenShift (ARO)](https://docs.microsoft.com/azure/openshift/intro-openshift) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an ARO cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc Data Services.
-
-> **NOTE: Currently, Azure Arc-enabled PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Check your subscription quota for the DSv3 family.
-
- > **NOTE: Azure Red Hat OpenShift requires a [minimum of 40 cores](/azure/openshift/tutorial-create-cluster#before-you-begin) to create and run an OpenShift cluster.**
-
- ```shell
- LOCATION=eastus
- az vm list-usage -l $LOCATION --query "[?contains(name.value, 'standardDSv3Family')]" -o table
- ```
-
- ![Screenshot of checking DSV3 family cores usage](./01.png)
-
-- Get the Azure Red Hat OpenShift resource provider Id which needs to be assigned with the “Contributor” role.
-
- ```shell
- az ad sp list --filter "displayname eq 'Azure Red Hat OpenShift RP'" --query "[?appDisplayName=='Azure Red Hat OpenShift RP'].{name: appDisplayName, objectId: id}"
- ```
-
- ![Screenshot of Azure resource provider for Aro](./02.png)
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aro_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/aro.json) - Deploys the ARO cluster where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the ARO cluster including the data controller.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
- - _`AroProviderId`_ - ARO resource provider Id.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~40-60min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources have been provisioned, you will be able to see them in the Azure portal. At this point, the resource group should have **8 various Azure resources** deployed.
-
- ![Screenshot showing ARM template deployment completed](./03.png)
-
- ![Screenshot showing new Azure resource group with all resources](./04.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_dc_vanilla_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_dc_vanilla_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_dc_vanilla_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./05.png)
-
- ![Screenshot showing adding a new inbound security rule](./06.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./07.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./08.png)
-
- ![Screenshot showing connecting to the VM using RDP](./09.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./10.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./11.png)
-
- ![Screenshot showing connecting to the VM using JIT](./12.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the post-run desktop](./32.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **11 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./33.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./34.png)
-
- ![Screenshot showing Azure Data Studio extensions](./35.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./36.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./37.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./38.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_mssql_mi_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_mssql_mi_arm_template/_index.md
deleted file mode 100644
index 7c58c42ef4..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_mssql_mi_arm_template/_index.md
+++ /dev/null
@@ -1,384 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance ARM Template"
-linkTitle: "SQL Managed Instance ARM Template"
-weight: 2
-description: >
----
-
-## Deploy Azure SQL Managed Instance in directly connected mode on ARO using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on [Azure Red Hat OpenShift (ARO)](https://docs.microsoft.com/azure/openshift/intro-openshift) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an ARO cluster deployed with an Azure Arc Data Controller, SQL Managed Instance, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Check your subscription quota for the DSv3 family.
-
- > **NOTE: Azure Red Hat OpenShift requires a [minimum of 40 cores](/azure/openshift/tutorial-create-cluster#before-you-begin) to create and run an OpenShift cluster.**
-
- ```shell
- LOCATION=eastus
- az vm list-usage -l $LOCATION --query "[?contains(name.value, 'standardDSv3Family')]" -o table
- ```
-
- ![Screenshot of checking DSV3 family cores usage](./01.png)
-
-- Get the Azure Red Hat OpenShift resource provider Id which needs to be assigned with the “Contributor” role.
-
- ```shell
- az ad sp list --filter "displayname eq 'Azure Red Hat OpenShift RP'" --query "[?appDisplayName=='Azure Red Hat OpenShift RP'].{name: appDisplayName, objectId: id}"
- ```
-
- ![Screenshot of Azure resource provider for Aro](./02.png)
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [ARO](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/aro.json) - Deploys the ARO cluster where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
- - User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the ARO cluster including the data controller and SQL Managed Instance.
-
- - In addition to deploying the data controller and SQL Managed Instance, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this Azure Arc-enabled SQL Managed Instance scenario we will set it to _**true**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, set this to either _**true**_ or _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
- - _`AroProviderId`_ - ARO resource provider Id
-
- > **Note:** In case you decided to deploy SQL Managed Instance in an highly-available fashion, refer to the ["Perform database failover with SQL Managed Instance Availability Groups"](../../day2/aks/aks_mssql_ha/_index.md) Jumpstart scenario as well as the ["High Availability with Azure Arc-enabled SQL Managed Instance"](https://learn.microsoft.com/azure/azure-arc/data/managed-instance-high-availability) product documentation.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources have been provisioned, you will be able to see them in the Azure portal. At this point, the resource group should have **8 various Azure resources** deployed (If you chose to deploy Azure Bastion, you will have **9 Azure resources**).
-
- ![Screenshot showing ARM template deployment completed](./03.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./04.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_mssql_mi_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_mssql_mi_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_mssql_mi_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG NSG with blocked RDP](./05.png)
-
- ![Screenshot showing adding a new inbound security rule](./06.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./07.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./08.png)
-
- ![Screenshot showing connecting to the VM using RDP](./09.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./10.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./11.png)
-
- ![Screenshot showing connecting to the VM using JIT](./12.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and SQL Managed Instance will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the PowerShell logon script run](./32.png)
-
- ![Screenshot showing the PowerShell logon script run](./33.png)
-
- ![Screenshot showing the PowerShell logon script run](./34.png)
-
- ![Screenshot showing the post-run desktop](./35.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and SQL Managed Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **12 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- - _Azure Arc-enabled SQL Managed Instance_ - The SQL Managed Instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./36.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./37.png)
-
- ![Screenshot showing Azure Data Studio extensions](./38.png)
-
-- Additionally, the SQL Managed Instance connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio SQL MI connection](./39.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
-![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./40.png)
-
-![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./41.png)
-
-## High Availability with SQL Always-On availability groups
-
-Azure Arc-enabled SQL Managed Instance is deployed on Kubernetes as a containerized application and uses kubernetes constructs such as stateful sets and persistent storage to provide built-in health monitoring, failure detection, and failover mechanisms to maintain service health. For increased reliability, you can also configure Azure Arc-enabled SQL Managed Instance to deploy with extra replicas in a high availability configuration.
-
-For showcasing and testing SQL Managed Instance with [Always On availability groups](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-high-availability#deploy-with-always-on-availability-groups), a dedicated [Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/) is available to help you simulate failures and get hands-on experience with this deployment model.
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance stress simulation
-
-Included in this scenario, is a dedicated SQL stress simulation tool named _SqlQueryStress_ automatically installed for you on the Client VM. _SqlQueryStress_ will allow you to generate load on the Azure Arc-enabled SQL Managed Instance that can be done used to showcase how the SQL database and services are performing as well to highlight operational practices described in the next section.
-
-- To start with, open the _SqlQueryStress_ desktop shortcut and connect to the SQL Managed Instance **primary** endpoint IP address. This can be found in the _SQLMI Endpoints_ text file desktop shortcut that was also created for you alongside the username and password you used to deploy the environment.
-
- ![Screenshot showing opened SqlQueryStress](./42.png)
-
- ![Screenshot showing SQLMI Endpoints text file](./43.png)
-
-> **NOTE: Secondary SQL Managed Instance endpoint will be available only when using the [HA deployment model ("Business Critical")](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/).**
-
-- To connect, use "SQL Server Authentication" and select the deployed sample _AdventureWorks_ database (you can use the "Test" button to check the connection).
-
- ![Screenshot showing SqlQueryStress connected](./44.png)
-
-- To generate some load, we will be running a simple stored procedure. Copy the below procedure and change the number of iterations you want it to run as well as the number of threads to generate even more load on the database. In addition, change the delay between queries to 1ms for allowing the stored procedure to run for a while.
-
- ```sql
- exec [dbo].[uspGetEmployeeManagers] @BusinessEntityID = 8
- ```
-
-- As you can see from the example below, the configuration settings are 100,000 iterations, five threads per iteration, and a 1ms delay between queries. These configurations should allow you to have the stress test running for a while.
-
- ![Screenshot showing SqlQueryStress settings](./45.png)
-
- ![Screenshot showing SqlQueryStress running](./46.png)
-
-### Azure Arc-enabled SQL Managed Instance monitoring using Grafana
-
-When deploying Azure Arc-enabled data services, a [Grafana](https://grafana.com/) instance is also automatically deployed on the same Kubernetes cluster and include built-in dashboards for both Kubernetes infrastructure as well SQL Managed Instance monitoring (PostgreSQL dashboards are included as well but we will not be covering these in this section).
-
-- Now that you have the _SqlQueryStress_ stored procedure running and generating load, we can look how this is shown in the the built-in Grafana dashboard. As part of the automation, a new URL desktop shortcut simply named "Grafana" was created.
-
- ![Screenshot showing Grafana desktop shortcut](./47.png)
-
-- [Optional] The IP address for this instance represents the Kubernetes _LoadBalancer_ external IP that was provision as part of Azure Arc-enabled data services. Use the _`kubectl get svc -n arc`_ command to view the _metricsui_ external service IP address.
-
- ![Screenshot showing metricsui Kubernetes service](./48.png)
-
-- To log in, use the same username and password that is in the _SQLMI Endpoints_ text file desktop shortcut.
-
- ![Screenshot showing Grafana username and password](./49.png)
-
-- Navigate to the built-in "SQL Managed Instance Metrics" dashboard.
-
- ![Screenshot showing Grafana dashboards](./50.png)
-
- ![Screenshot showing Grafana "SQL Managed Instance Metrics" dashboard](./51.png)
-
-- Change the dashboard time range to "Last 5 minutes" and re-run the stress test using _`SqlQueryStress`_ (in case it was already finished).
-
- ![Screenshot showing "Last 5 minutes" time range](./52.png)
-
-- You can now see how the SQL graphs are starting to show increased activity and load on the database instance.
-
- ![Screenshot showing increased load activity](./53.png)
-
- ![Screenshot showing increased load activity](./54.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./55.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_postgresql_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_postgresql_arm_template/_index.md
deleted file mode 100644
index a8a309b48e..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/aro/aro_postgresql_arm_template/_index.md
+++ /dev/null
@@ -1,322 +0,0 @@
----
-type: docs
-title: "PostgreSQL ARM Template"
-linkTitle: "PostgreSQL ARM Template"
-weight: 3
-description: >
----
-
-## Deploy Azure PostgreSQL in directly connected mode on ARO using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [PostgreSQL](https://docs.microsoft.com/azure/azure-arc/data/what-is-azure-arc-enabled-postgres-hyperscale) deployed on [Azure Red Hat OpenShift (ARO)](https://docs.microsoft.com/azure/openshift/intro-openshift) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an ARO cluster deployed with an Azure Arc Data Controller, PostgreSQL instance, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Check your subscription quota for the DSv3 family.
-
- > **NOTE: Azure Red Hat OpenShift requires a [minimum of 40 cores](/azure/openshift/tutorial-create-cluster#before-you-begin) to create and run an OpenShift cluster.**
-
- ```shell
- LOCATION=eastus
- az vm list-usage -l $LOCATION --query "[?contains(name.value, 'standardDSv3Family')]" -o table
- ```
-
- ![Screenshot of checking DSV3 family cores usage](./01.png)
-
-- Get the Azure Red Hat OpenShift resource provider Id which needs to be assigned with the “Contributor” role.
-
- ```shell
- az ad sp list --filter "displayname eq 'Azure Red Hat OpenShift RP'" --query "[?appDisplayName=='Azure Red Hat OpenShift RP'].{name: appDisplayName, objectId: id}"
- ```
-
- ![Screenshot of Azure resource provider for Aro](./02.png)
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aro_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/aro.json) - Deploys the aro cluster where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the aro cluster including the data controller and PostgreSQL.
-
-- In addition to deploying the data controller and PostgreSQL, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this Azure Arc-enabled PostgreSQL scenario we will set it to _**true**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
- - _`AroProviderId`_ - ARO resource provider Id.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aro/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. At this point, the resource group should have **8 various Azure resources** deployed (If you chose to deploy Azure Bastion, you will have **9 Azure resources**).
-
- ![Screenshot showing ARM template deployment completed](./03.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./04.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_postgresql_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_postgresql_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aro/aro_postgresql_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./05.png)
-
- ![Screenshot showing adding a new inbound security rule](./06.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./07.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./08.png)
-
- ![Screenshot showing connecting to the VM using RDP](./09.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./10.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./11.png)
-
- ![Screenshot showing connecting to the VM using JIT](./12.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aro/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and PostgreSQL will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the PowerShell logon script run](./32.png)
-
- ![Screenshot showing the PowerShell logon script run](./33.png)
-
- ![Screenshot showing the PowerShell logon script run](./34.png)
-
- ![Screenshot showing the PowerShell logon script run](./35.png)
-
- ![Screenshot showing the PowerShell logon script run](./36.png)
-
- ![Screenshot showing the PowerShell logon script run](./37.png)
-
- ![Screenshot showing the post-run desktop](./38.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and PostgreSQL instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **12 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- - _Azure Arc-enabled PostgreSQL_ - The PostgreSQL instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./39.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./40.png)
-
- ![Screenshot showing Azure Data Studio extensions](./41.png)
-
-- Additionally, the PostgreSQL connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio PostgresSQL connection](./42.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
-![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./43.png)
-
-![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./44.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./45.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/_index.md
deleted file mode 100644
index 239035b5a5..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Kubernetes Cluster API"
-linkTitle: "Kubernetes Cluster API"
-weight: 2
-description: >-
- If you do not yet have a Kubernetes cluster, the scenarios in this section will guide on deploying Azure Arc-enabled data services on Cluster API (CAPI) Kubernetes in an automated fashion using ARM templates.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_dc_vanilla_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_dc_vanilla_arm_template/_index.md
deleted file mode 100644
index 250252c2a1..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_dc_vanilla_arm_template/_index.md
+++ /dev/null
@@ -1,313 +0,0 @@
----
-type: docs
-title: "Data Controller ARM Template"
-linkTitle: "Data Controller ARM Template"
-weight: 1
-description: >
----
-
-## Deploy a vanilla Azure Arc Data Controller in directly connected mode on Cluster API Kubernetes cluster with Azure provider using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) deployed on [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/introduction.html) Kubernetes cluster and it's [Cluster API Azure provider (CAPZ)](https://cloudblogs.microsoft.com/opensource/2020/12/15/introducing-cluster-api-provider-azure-capz-kubernetes-cluster-management/).
-
-By the end of this scenario, you will have a CAPI Kubernetes cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the Cluster API Book docs:
-
-"Cluster API requires an existing Kubernetes cluster accessible via kubectl; during the installation process the Kubernetes cluster will be transformed into a management cluster by installing the Cluster API provider components, so it is recommended to keep it separated from any application workload."
-
-in this scenario and as part of the automation flow (described below), a [Rancher K3s](https://rancher.com/docs/k3s/latest/en/) cluster will be deployed which will be used as the management cluster. This cluster will then be used to deploy the workload cluster using the Cluster API Azure provider (CAPZ).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_ubuntuCapi_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/ubuntuCapi.json) - Deploys an Ubuntu Linux VM which will have Rancher K3s installed and transformed into a Cluster API management cluster via the Azure CAPZ provider. As part of it's automation and the [_installCAPI_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/installCAPI.sh) shell script, a new Azure Arc-enabled Kubernetes cluster will already be created to be used by the rest of the Azure Arc-enabled data services automation. Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom location](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the CAPI workload cluster including the data controller.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- You will also need to get the Azure Custom Location Resource Provider (RP) Object ID (OID) and export it as an environment variable. This is required to enable [Custom Location](https://learn.microsoft.com/azure/azure-arc/platform/conceptual-custom-locations) on your cluster.
-
- > **NOTE: You need permissions to list all the service principals.**
- #### Option 1: Bash
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. As mentioned, a new Azure Arc-enabled Kubernetes cluster resource will already be available at this point.
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./03.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_dc_vanilla_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_dc_vanilla_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_dc_vanilla_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./04.png)
-
- ![Screenshot showing adding a new inbound security rule](./05.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./06.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./07.png)
-
- ![Screenshot showing connecting to the VM using RDP](./08.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./09.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./10.png)
-
- ![Screenshot showing connecting to the VM using JIT](./11.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the post-run desktop](./26.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - Custom location - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - Azure Arc Data Controller - The data controller that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./27.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./28.png)
-
- ![Screenshot showing Azure Data Studio extensions](./29.png)
-
-## Cluster extensions
-
-In this scenario, four Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _microsoft.policyinsights_ - The Azure Policy cluster extension. To learn more about it, read the [Understand Azure Policy for Kubernetes clusters](https://docs.microsoft.com/azure/governance/policy/concepts/policy-for-kubernetes) Azure doc.
-
-- _microsoft.azuredefender.kubernetes_ - The Microsoft Defender for Cloud cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Defender with Cluster API as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/cluster_api/cluster_api_defender_extension/#create-azure-defender-extensions-instance) scenario.
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./30.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./31.png)
-
-### Exploring logs from the Client virtual machine
-
-Occasionally, you may need to review log output from scripts that run on the _Arc-Data-Client_ or _Arc-Data-CAPI-MGMT_ virtual machines in case of deployment failures. To make troubleshooting easier, the scenario deployment scripts collect all relevant logs in the _C:\Temp_ folder on _Arc-Data-Client_. A short description of the logs and their purpose can be seen in the list below:
-
-| Logfile | Description |
-| ------- | ----------- |
-| _C:\Temp\Bootstrap.log_ | Output from the initial bootstrapping script that runs on _Arc-Data-Client_. |
-| _C:\Temp\DataServicesLogonScript.log_ | Output of _DataServicesLogonScript.ps1_ which configures Azure Arc-enabled data services baseline capability. |
-| _C:\Temp\installCAPI.log_ | Output from the custom script extension which runs on _Arc-Data-CAPI-MGMT_ and configures the Cluster API for Azure cluster and onboards it as an Azure Arc-enabled Kubernetes cluster. If you encounter ARM deployment issues with _ubuntuCapi.json_ then review this log. |
-
-![Screenshot showing the Temp folder with deployment logs](./32.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./33.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_mssql_mi_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_mssql_mi_arm_template/_index.md
deleted file mode 100644
index 730d9a13ca..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_mssql_mi_arm_template/_index.md
+++ /dev/null
@@ -1,397 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance ARM Template"
-linkTitle: "SQL Managed Instance ARM Template"
-weight: 2
-description: >
----
-
-## Deploy Azure Arc-enabled SQL Managed Instance in directly connected mode on Cluster API Kubernetes cluster with Azure provider using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/introduction.html) Kubernetes cluster and it's [Cluster API Azure provider (CAPZ)](https://cloudblogs.microsoft.com/opensource/2020/12/15/introducing-cluster-api-provider-azure-capz-kubernetes-cluster-management/).
-
-By the end of this scenario, you will have a CAPI Kubernetes cluster deployed with an Azure Arc Data Controller, SQL Managed Instance (with a sample database), and a Microsoft Windows Server 2022 (Datacenter) Azure sidecar VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the Cluster API Book docs:
-
-"Cluster API requires an existing Kubernetes cluster accessible via kubectl; during the installation process the Kubernetes cluster will be transformed into a management cluster by installing the Cluster API provider components, so it is recommended to keep it separated from any application workload."
-
-in this scenario and as part of the automation flow (described below), a [Rancher K3s](https://rancher.com/docs/k3s/latest/en/) cluster will be deployed which will be used as the management cluster. This cluster will then be used to deploy the workload cluster using the Cluster API Azure provider (CAPZ).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_ubuntuCapi_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/ubuntuCapi.json) - Deploys an Ubuntu Linux VM which will have Rancher K3s installed and transformed into a Cluster API management cluster via the Azure CAPZ provider. As part of it's automation and the [_installCAPI_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/installCAPI.sh) shell script, a new Azure Arc-enabled Kubernetes cluster will already be created to be used by the rest of the Azure Arc-enabled data services automation. Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom location](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the CAPI workload cluster including the data controller.
-
-- In addition to deploying the data controller and SQL Managed Instance, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder.
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this Azure Arc-enabled SQL Managed Instance scenario we will set it to _**true**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, set this to either _**true**_ or _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- You will also need to get the Azure Custom Location Resource Provider (RP) Object ID (OID) and export it as an environment variable. This is required to enable [Custom Location](https://learn.microsoft.com/azure/azure-arc/platform/conceptual-custom-locations) on your cluster.
-
- > **NOTE: You need permissions to list all the service principals.**
- #### Option 1: Bash
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. As mentioned, a new Azure Arc-enabled Kubernetes cluster resource will already be available at this point.
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./03.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_mssql_mi_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_mssql_mi_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_mssql_mi_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./04.png)
-
- ![Screenshot showing adding a new inbound security rule](./05.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./06.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./07.png)
-
- ![Screenshot showing connecting to the VM using RDP](./08.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./09.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./10.png)
-
- ![Screenshot showing connecting to the VM using JIT](./11.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and the SQL Managed Instance will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the post-run desktop](./29.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - Custom location - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - Azure Arc Data Controller - The data controller that is now deployed on the Kubernetes cluster.
-
- - Azure Arc-enabled SQL Managed Instance - The SQL Managed Instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./30.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./31.png)
-
- ![Screenshot showing Azure Data Studio extensions](./32.png)
-
-- Additionally, the SQL Managed Instance connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio SQL MI connection](./33.png)
-
-## Cluster extensions
-
-In this scenario, four Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _microsoft.policyinsights_ - The Azure Policy cluster extension. To learn more about it, read the [Understand Azure Policy for Kubernetes clusters](https://docs.microsoft.com/azure/governance/policy/concepts/policy-for-kubernetes) Azure doc.
-
-- _microsoft.azuredefender.kubernetes_ - The Microsoft Defender for Cloud cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Defender with Cluster API as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/cluster_api/cluster_api_defender_extension/#create-azure-defender-extensions-instance) scenario.
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./34.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./35.png)
-
-## High Availability with SQL Always-On availability groups
-
-Azure Arc-enabled SQL Managed Instance is deployed on Kubernetes as a containerized application and uses kubernetes constructs such as stateful sets and persistent storage to provide built-in health monitoring, failure detection, and failover mechanisms to maintain service health. For increased reliability, you can also configure Azure Arc-enabled SQL Managed Instance to deploy with extra replicas in a high availability configuration.
-
-For showcasing and testing SQL Managed Instance with [Always On availability groups](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-high-availability#deploy-with-always-on-availability-groups), a dedicated [Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/) is available to help you simulate failures and get hands-on experience with this deployment model.
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance stress simulation
-
-Included in this scenario, is a dedicated SQL stress simulation tool named _SqlQueryStress_ automatically installed for you on the Client VM. _SqlQueryStress_ will allow you to generate load on the Azure Arc-enabled SQL Managed Instance that can be done used to showcase how the SQL database and services are performing as well to highlight operational practices described in the next section.
-
-- To start with, open the _SqlQueryStress_ desktop shortcut and connect to the SQL Managed Instance **primary** endpoint IP address. This can be found in the _SQLMI Endpoints_ text file desktop shortcut that was also created for you alongside the username and password you used to deploy the environment.
-
- ![Screenshot showing opened SqlQueryStress](./36.png)
-
- ![Screenshot showing SQLMI Endpoints text file](./37.png)
-
-> **NOTE: Secondary SQL Managed Instance endpoint will be available only when using the [HA deployment model ("Business Critical")](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/).**
-
-- To connect, use "SQL Server Authentication" and select the deployed sample _AdventureWorks_ database (you can use the "Test" button to check the connection).
-
- ![Screenshot showing SqlQueryStress connected](./38.png)
-
-- To generate some load, we will be running a simple stored procedure. Copy the below procedure and change the number of iterations you want it to run as well as the number of threads to generate even more load on the database. In addition, change the delay between queries to 1ms for allowing the stored procedure to run for a while.
-
- ```sql
- exec [dbo].[uspGetEmployeeManagers] @BusinessEntityID = 8
- ```
-
-- As you can see from the example below, the configuration settings are 100,000 iterations, five threads per iteration, and a 1ms delay between queries. These configurations should allow you to have the stress test running for a while.
-
- ![Screenshot showing SqlQueryStress settings](./39.png)
-
- ![Screenshot showing SqlQueryStress running](./40.png)
-
-### Azure Arc-enabled SQL Managed Instance monitoring using Grafana
-
-When deploying Azure Arc-enabled data services, a [Grafana](https://grafana.com/) instance is also automatically deployed on the same Kubernetes cluster and include built-in dashboards for both Kubernetes infrastructure as well SQL Managed Instance monitoring (PostgreSQL dashboards are included as well but we will not be covering these in this section).
-
-- Now that you have the _SqlQueryStress_ stored procedure running and generating load, we can look how this is shown in the the built-in Grafana dashboard. As part of the automation, a new URL desktop shortcut simply named "Grafana" was created.
-
- ![Screenshot showing Grafana desktop shortcut](./41.png)
-
-- [Optional] The IP address for this instance represents the Kubernetes _LoadBalancer_ external IP that was provision as part of Azure Arc-enabled data services. Use the _`kubectl get svc -n arc`_ command to view the _metricsui_ external service IP address.
-
- ![Screenshot showing metricsui Kubernetes service](./42.png)
-
-- To log in, use the same username and password that is in the _SQLMI Endpoints_ text file desktop shortcut.
-
- ![Screenshot showing Grafana username and password](./43.png)
-
-- Navigate to the built-in "SQL Managed Instance Metrics" dashboard.
-
- ![Screenshot showing Grafana dashboards](./44.png)
-
- ![Screenshot showing Grafana "SQL Managed Instance Metrics" dashboard](./45.png)
-
-- Change the dashboard time range to "Last 5 minutes" and re-run the stress test using _SqlQueryStress_ (in case it was already finished).
-
- ![Screenshot showing "Last 5 minutes" time range](./46.png)
-
-- You can now see how the SQL graphs are starting to show increased activity and load on the database instance.
-
- ![Screenshot showing increased load activity](./47.png)
-
- ![Screenshot showing increased load activity](./48.png)
-
-### Exploring logs from the Client virtual machine
-
-Occasionally, you may need to review log output from scripts that run on the _Arc-Data-Client_ or _Arc-Data-CAPI-MGMT_ virtual machines in case of deployment failures. To make troubleshooting easier, the scenario deployment scripts collect all relevant logs in the _C:\Temp_ folder on _Arc-Data-Client_. A short description of the logs and their purpose can be seen in the list below:
-
-| Logfile | Description |
-| ------- | ----------- |
-| _C:\Temp\Bootstrap.log_ | Output from the initial bootstrapping script that runs on _Arc-Data-Client_. |
-| _C:\Temp\DataServicesLogonScript.log_ | Output of _DataServicesLogonScript.ps1_ which configures Azure Arc-enabled data services baseline capability. |
-| _C:\Temp\DeploySQLMI.log_ | Output of _deploySQL.ps1_ which deploys and configures SQL Managed Instance with Azure Arc. |
-| _C:\Temp\installCAPI.log_ | Output from the custom script extension which runs on _Arc-Data-CAPI-MGMT_ and configures the Cluster API for Azure cluster and onboards it as an Azure Arc-enabled Kubernetes cluster. If you encounter ARM deployment issues with _ubuntuCapi.json_ then review this log. |
-| _C:\Temp\SQLMIEndpoints.log_ | Output from _SQLMIEndpoints.ps1_ which collects the service endpoints for SQL MI and uses them to configure Azure Data Studio connection settings. |
-
-![Screenshot showing the Temp folder with deployment logs](./49.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./50.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_postgresql_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_postgresql_arm_template/_index.md
deleted file mode 100644
index 01fe7adc33..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_postgresql_arm_template/_index.md
+++ /dev/null
@@ -1,330 +0,0 @@
----
-type: docs
-title: "PostgreSQL ARM Template"
-linkTitle: "PostgreSQL ARM Template"
-weight: 3
-description: >
----
-
-## Deploy Azure Arc-enabled PostgreSQL in directly connected mode on Cluster API Kubernetes cluster with Azure provider using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [PostgreSQL](https://docs.microsoft.com/azure/azure-arc/data/what-is-azure-arc-enabled-postgres-hyperscale) deployed on [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/introduction.html) Kubernetes cluster and it's [Cluster API Azure provider (CAPZ)](https://cloudblogs.microsoft.com/opensource/2020/12/15/introducing-cluster-api-provider-azure-capz-kubernetes-cluster-management/).
-
-By the end of this scenario, you will have a CAPI Kubernetes cluster deployed with an Azure Arc Data Controller, PostgreSQL instance (with a sample database), and a Microsoft Windows Server 2022 (Datacenter) Azure sidecar VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the Cluster API Book docs:
-
-"Cluster API requires an existing Kubernetes cluster accessible via kubectl; during the installation process the Kubernetes cluster will be transformed into a management cluster by installing the Cluster API provider components, so it is recommended to keep it separated from any application workload."
-
-in this scenario and as part of the automation flow (described below), a [Rancher K3s](https://rancher.com/docs/k3s/latest/en/) cluster will be deployed which will be used as the management cluster. This cluster will then be used to deploy the workload cluster using the Cluster API Azure provider (CAPZ).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_ubuntuCapi_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/ubuntuCapi.json) - Deploys an Ubuntu Linux VM which will have Rancher K3s installed and transformed into a Cluster API management cluster via the Azure CAPZ provider. As part of it's automation and the [_installCAPI_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/installCAPI.sh) shell script, a new Azure Arc-enabled Kubernetes cluster will already be created to be used by the rest of the Azure Arc-enabled data services automation. Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom location](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the CAPI workload cluster including the data controller.
-
-- In addition to deploying the data controller and PostgreSQL, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder.
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this Azure Arc-enabled PostgreSQL scenario we will set it to _**true**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- You will also need to get the Azure Custom Location Resource Provider (RP) Object ID (OID) and export it as an environment variable. This is required to enable [Custom Location](https://learn.microsoft.com/azure/azure-arc/platform/conceptual-custom-locations) on your cluster.
-
- > **NOTE: You need permissions to list all the service principals.**
- #### Option 1: Bash
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal. As mentioned, a new Azure Arc-enabled Kubernetes cluster resource will already be available at this point.
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./03.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_postgresql_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_postgresql_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure_postgresql_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./04.png)
-
- ![Screenshot showing adding a new inbound security rule](./05.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./06.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./07.png)
-
- ![Screenshot showing connecting to the VM using RDP](./08.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./09.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./10.png)
-
- ![Screenshot showing connecting to the VM using JIT](./11.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/cluster_api/capi_azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and PostgreSQL will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the post-run desktop](./30.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - Custom location - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - Azure Arc Data Controller - The data controller that is now deployed on the Kubernetes cluster.
-
- - Azure Arc-enabled PostgreSQL - The PostgreSQL instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./31.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./32.png)
-
- ![Screenshot showing Azure Data Studio extensions](./33.png)
-
-- Additionally, the PostgreSQL connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio PostgresSQL connection](./34.png)
-
-## Cluster extensions
-
-In this scenario, four Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _microsoft.policyinsights_ - The Azure Policy cluster extension. To learn more about it, read the [Understand Azure Policy for Kubernetes clusters](https://docs.microsoft.com/azure/governance/policy/concepts/policy-for-kubernetes) Azure doc.
-
-- _microsoft.azuredefender.kubernetes_ - The Microsoft Defender for Cloud cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Defender with Cluster API as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/cluster_api/cluster_api_defender_extension/#create-azure-defender-extensions-instance) scenario.
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./35.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./36.png)
-
-### Exploring logs from the Client virtual machine
-
-Occasionally, you may need to review log output from scripts that run on the _Arc-Data-Client_ or _Arc-Data-CAPI-MGMT_ virtual machines in case of deployment failures. To make troubleshooting easier, the scenario deployment scripts collect all relevant logs in the _C:\Temp_ folder on _Arc-Data-Client_. A short description of the logs and their purpose can be seen in the list below:
-
-| Logfile | Description |
-| ------- | ----------- |
-| _C:\Temp\Bootstrap.log_ | Output from the initial bootstrapping script that runs on _Arc-Data-Client_. |
-| _C:\Temp\DataServicesLogonScript.log_ | Output of _DataServicesLogonScript.ps1_ which configures Azure Arc-enabled data services baseline capability. |
-| _C:\Temp\DeployPostgreSQL.log_ | Output of _deployPostgreSQL.ps1_ which deploys and configures PostgreSQL with Azure Arc. |
-| _C:\Temp\installCAPI.log_ | Output from the custom script extension which runs on _Arc-Data-CAPI-MGMT_ and configures the Cluster API for Azure cluster and onboards it as an Azure Arc-enabled Kubernetes cluster. If you encounter ARM deployment issues with _ubuntuCapi.json_ then review this log. |
-
-![Screenshot showing the Temp folder with deployment logs](./37.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Delete Azure resource group](./38.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/_index.md
deleted file mode 100644
index 94b3864646..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Unified Operations Use Cases"
-linkTitle: "Unified Operations Use Cases"
-weight: 8
-description: >-
- Once you have your Azure Arc-enabled data services deployed, you can start to use native Azure tooling to manage it as well explore various architecture and deployment models.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/_index.md
deleted file mode 100644
index 61c3a779c3..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/_index.md
+++ /dev/null
@@ -1,7 +0,0 @@
----
-type: docs
-title: "Azure Kubernetes Service"
-linkTitle: "Azure Kubernetes Service"
-weight: 1
-description: >-
----
\ No newline at end of file
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_arm_template_ado/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_arm_template_ado/_index.md
deleted file mode 100644
index 41fbca7ce7..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_arm_template_ado/_index.md
+++ /dev/null
@@ -1,198 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance Azure DevOps Release"
-linkTitle: "SQL Managed Instance Azure DevOps Release"
-weight: 5
-description: >
----
-
-## Deploy Azure SQL Managed Instance on AKS using Azure DevOps Release Pipeline
-
-The following Jumpstart scenario will guide you on how to use [Azure DevOps (ADO) Release pipelines](https://docs.microsoft.com/azure/devops/pipelines/release/?view=azure-devops) to deploy a "Ready to Go" environment so you can start using Azure Arc-enabled data services on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an Azure DevOps Release pipeline to deploy AKS cluster with an Azure Arc Data Controller ([in "Directly Connected" mode](https://docs.microsoft.com/azure/azure-arc/data/connectivity), Azure SQL MI with a sample database and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc Data Services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-> **NOTE: The following scenario is focusing the Azure DevOps Release pipeline creation. Once the pipeline has been created and the environment deployment has finished, the automation flow and next steps are as [described on in the main bootstrap scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/)**
-
-## Prerequisites
-
-- [Azure DevOps account](https://azure.microsoft.com/services/devops/) set up with your organization and ready for project creation.
- - (Optional) [Create new Azure DevOps organization](https://docs.microsoft.com/azure/devops/organizations/accounts/create-organization?view=azure-devops).
- - (Optional) [Create new Azure DevOps project](https://docs.microsoft.com/azure/devops/organizations/projects/create-project?view=azure-devops&tabs=preview-page).
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP)
-
- To be able to complete the scenario and its related automation, Azure service principal assigned with the “Contributor” role is required. To create it, login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Deployment
-
-In this scenario, you will create a new Release pipeline to deploy the environment ARM template for this Jumpstart scenario.
-
-- In a new or an existing ADO project, start the process of creating a new release pipeline.
-
- ![Screenshot of creating new ADO pipeline](./01.jpg)
-
- ![Screenshot of creating new ADO pipeline](./02.jpg)
-
-- To create the pipeline, we will be using an empty job template and give it a name (once done click the X button).
-
- ![Screenshot of creating new empty job template](./03.jpg)
-
- ![Screenshot of creating new empty job template](./04.jpg)
-
-- Create a new task for the stage you have just created. This task will be the one for deploying the ARM template.
-
- ![Screenshot of creating new ARM template deployment task](./05.jpg)
-
- ![Screenshot of creating new ARM template deployment task](./06.jpg)
-
-- Click on the new task to start it's configuration.
-
- ![Screenshot of deployment task config](./07.jpg)
-
-- When deploying an ARM template, the Azure Resource Manager connection and subscription must be provided.
-
- ![Screenshot of Azure Resource Manager connection config](./08.jpg)
-
- > **NOTE: For new ADO project, you will be asked to click the authorization button**
-
- ![Screenshot of Azure subscription config](./09.jpg)
-
-- Provide the Azure resource group and location where all the resources will be deployed. Make sure to validate if the service is [currently available in your Azure region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-arc).
-
- ![Screenshot of resource group and location config](./10.jpg)
-
-- As mentioned, the task will use the existing ARM template for deploying Azure Arc-enabled data services with SQL Managed Instance that in the Azure Arc Jumpstart GitHub repository.
-
- - Change the Template location to "URL of the file"
-
- - Copy the raw URLs for both the [template](https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json) and the [parameters](https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.parameters.json) json files and paste it in it's the proper field.
-
- - The deployment ARM template requires you to provide parameters values. Click on the _Edit Override template parameters_ button to add the values of your parameters.
-
- ![Screenshot of ARM template config](./11.jpg)
-
- - _`sshRSAPublicKey`_ - Your ssh public key
- - _`spnClientId`_ - Your Azure service principal name
- - _`spnClientSecret`_ - Your Azure service principal password
- - _`spnTenantId`_ - Your Azure tenant ID
- - _`windowsAdminUsername`_ - Client Windows VM admin username
- - _`windowsAdminPassword`_ - Client Windows VM admin password
- - _`myIpAddress`_ - Public IP address of your network
- - _`logAnalyticsWorkspaceName`_ - Unique Log Analytics workspace name
- - _`deploySQLMI`_ - SQL Managed Instance deployment (true/false)
- - _`SQLMIHA`_ - SQL Managed Instance high-availability deployment (true/false)
- - _`deployPostgreSQL`_ - PostgreSQL deployment (true/false)
- - _`clusterName`_ - AKS cluster name
- - _`bastionHostName`_ - Indicate whether to deploy bastion host to manage AKS
- - _`dnsPrefix`_ - AKS unique DNS prefix
- - _`kubernetesVersion`_ - AKS Kubernetes Version (See previous prerequisite)
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- ![Screenshot of ARM template parameters config](./12.jpg)
-
- ![Screenshot of ARM template parameters config](./13.jpg)
-
- ![Screenshot of ARM template parameters config](./14.jpg)
-
- ![Screenshot of ARM template parameters config](./15.jpg)
-
-- Provide a deployment name.
-
- ![Screenshot of deployment name config](./16.jpg)
-
-- Provide pipeline name and click the save button.
-
- ![Screenshot of config save](./17.jpg)
-
-- After saving the task configuration, continue to create the release pipeline.
-
- ![Screenshot of pipeline creation](./18.jpg)
-
- ![Screenshot of pipeline creation](./19.jpg)
-
- ![Screenshot of pipeline creation](./20.jpg)
-
- ![Screenshot of pipeline creation](./21.jpg)
-
-- Once done, click on the new release link. In this scenario, you will perform a manually triggering for the deployment. Once you do, click on the Logs button to see the progress.
-
- ![Screenshot of pipeline deployment](./22.jpg)
-
- ![Screenshot of pipeline deployment](./23.jpg)
-
- ![Screenshot of pipeline deployment](./24.jpg)
-
- ![Screenshot of deployment progress logs](./25.jpg)
-
- ![Screenshot of deployment progress logs](./26.jpg)
-
-- Once completed, all the deployment resources will be available in the Azure portal.
-
- > **NOTE: Deployment time of the Azure resources (AKS + Windows VM) can take ~25-30 minutes.**
-
- ![Screenshot of deployment completed](./27.jpg)
-
- ![Screenshot of Azure resources](./28.jpg)
-
-- As mentioned, this scenario is focusing on the Azure DevOps Release pipeline creation. At this point, now that you have the Azure resources created, continue to the next steps as [described on in the main bootstrap scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/#windows-login--post-deployment).
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_dr/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_dr/_index.md
deleted file mode 100644
index d6b57bbbeb..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_dr/_index.md
+++ /dev/null
@@ -1,397 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance disaster recovery"
-linkTitle: "SQL Managed Instance disaster recovery"
-weight: 3
-description: >
----
-
-## Configure disaster recovery in Azure Arc-enabled SQL Managed Instance on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deply a "Ready to Go" environment so you can configure [disaster recovery](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-disaster-recovery) using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/dr/intro-kubernetes) cluster using an [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this guide, you will have two Azure Kubernetes Service (AKS) clusters deployed in two separate Azure virtual networks with two Azure Arc-enabled SQL Managed Instances deployed on both clusters, disaster recovery architecture configured between the two sites, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services:
-
-![Screenshot showing the deployed architecture](./diagram.png)
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/VNET.json) - Deploys three Virtual Networks, two for each site where the clusters will be located and a third Virtual Network to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/aks.json) - Deploys the two AKS clusters in both sites (primary and secondary) where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
- - User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the AKS clusters including the data controllers and SQL Managed Instances, in addition to configuring disaster recovery between the two clusters.
-
- - In addition to deploying the data controllers and SQL Managed Instances, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well on the primary cluster.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this Azure Arc-enabled SQL Managed Instance scenario we will set it to _**true**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, set this to either _**true**_ or _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
- > **NOTE: This scenario goes through the capability to failover one Azure Arc-enabled SQL Managed Instance to another instance on a different cluster. If you would like to learn about high availability within the same cluster, you can check our Jumpstart [SQL Managed Instance Availability Groups Failover](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/) scenario.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/DR/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/DR/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources have been provisioned, you will be able to see them in the Azure portal. At this point, the resource group should have **11 various Azure resources** deployed (If you chose to deploy Azure Bastion, you will have **12 Azure resources**).
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_dr/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_dr/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_dr/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/DR/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and SQL Managed Instance will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the PowerShell logon script run](./32.png)
-
- ![Screenshot showing the PowerShell logon script run](./33.png)
-
- ![Screenshot showing the PowerShell logon script run](./34.png)
-
- ![Screenshot showing the PowerShell logon script run](./35.png)
-
- ![Screenshot showing the PowerShell logon script run](./36.png)
-
- ![Screenshot showing the post-run desktop](./37.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and SQL Managed Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **19 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controllers that are now deployed on the Kubernetes clusters.
-
- - _Azure Arc-enabled SQL Managed Instance_ - The SQL Managed Instances that are now deployed on the Kubernetes clusters.
-
- ![Screenshot showing additional Azure resources in the resource group](./38.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./39.png)
-
- ![Screenshot showing Azure Data Studio extensions](./40.png)
-
-- Additionally, the SQL Managed Instances connections will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation on the primary instance.
-
- ![Screenshot showing Azure Data Studio SQL MI connection](./41.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with AKS as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/aks/aks_monitor/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
-![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./42.png)
-
-![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./43.png)
-
-## Disaster recovery with SQL distributed availability groups
-
-Azure Arc-enabled SQL Managed Instance is deployed on Kubernetes as a containerized application and uses Kubernetes constructs such as stateful sets and persistent storage to provide built-in health monitoring, failure detection, and failover mechanisms to maintain service health. For increased reliability, you can also configure disaster recovery between multiple Azure Arc-enabled SQL Managed Instances to be able to failover to another instance on another Kubernetes cluster. Disaster recovery is supported for both General Purpose and Business Critical tiers.
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance distributed availability group validation
-
-- To be able to failover to a different cluster, a distributed availability group is created by the automation flow that spans the primary and secondary clusters. This can be validated by running the following commands:
-
- ```shell
- az sql instance-failover-group-arc show --name primarycr --use-k8s --k8s-namespace arc
- ```
-
- ![Screenshot showing disaster recovery configuration](./44.png)
-
-- As part of the automation, the script will also create a new text file and a desktop shortcut named Endpoints that includes both the primary and the secondary SQL endpoints for both SQL instances.
-
- ![Screenshot showing the Azure Arc-enabled SQL Managed Instances endpoint URLs text file](./45.png)
-
-- Open Microsoft SQL Server Management Studio (SSMS) which is installed automatically for you as part of the bootstrap Jumpstart scenario and use the primary endpoint IP address for the primary cluster and login to the primary DB instance using the username and password provided in the text file mentioned above.
-
- ![Screenshot showing opening SQL Server Management Studio from the start menu](./46.png)
-
-- Use the username and password you entered when provisioned the environment and select “SQL Server Authentication”. Alternatively, you can retrieve the username and password using the _`$env:AZDATA_USERNAME`_ and _`$env:AZDATA_PASSWORD`_ commands.
-
- ![Screenshot showing logging into the SQL Server Management Studio](./47.png)
-
-- Connect to the secondary instance as well using the primary endpoint IP address for the secondary cluster in the in the text file mentioned above.
-
- ![Screenshot showing the SQL Server Management Studio after login](./48.png)
-
- ![Screenshot showing the SQL Server Management Studio after login](./49.png)
-
- ![Screenshot showing the two Azure Arc-enabled SQL Managed Instances connected in the SQL Management Studio](./50.png)
-
-- Expand the _Always On High Availability_ node on both instances to verify that the distributed availability group is created.
-
- ![Screenshot showing the local and distributed Availabilty groups on both instances ](./51.png)
-
-- You will find the _AdventureWorks2019_ database already deployed into the primary instance (_js-sql-pr_) and automatically replicated to the secondary instance (_js-sql-dr_) as part of the distributed availability group.
-
- ![Screenshot showing adventureworks database opened on the primary instance](./52.png)
-
- > **NOTE: You will not be able to browse the _AdventureWorks2019_ database from the secondary instance since this instance is configured as a disaster recovery instace**.
-
- ![Screenshot showing adventureworks database opened on the secondary instance](./53.png)
-
-### Simulating failure on the primary site
-
-- First, to test that the DB replication is working, a simple table modification is needed. For this example, on the primary replica, run the following query to update the title of one of the rows to be _Jumpstart Administrator_
-
- ```Sql
- USE [AdventureWorks2019]
- GO
- UPDATE [HumanResources].[Employee]
- SET [JobTitle] = 'Jumpstart Administrator'
- WHERE NationalIDNumber = 245797967
- GO
- ```
-
- ![Screenshot showing updating a record in the database](./54.png)
-
- ![Screenshot showing the updated record in the database](./55.png)
-
-- To simulate a disaster situation, navigate to the AKS cluster in the Azure portal and stop the primary cluster.
-
- ![Screenshot showing stopping the primary AKS cluster](./56.png)
-
-- Wait for two minutes for the cluster to shutdown and try to refresh the connection to the primary instance and you can see that its no longer available.
-
- ![Screenshot showing unavailable primary instance](./57.png)
-
-### Initiating a forced failover to the secondary site.
-
-- On the client VM, run the following commands on the secondary instance to promote to primary with a forced failover incurring potential data loss.
-
- ```shell
- kubectx secondary
- az sql instance-failover-group-arc update --k8s-namespace arc --name secondarycr --use-k8s --role force-primary-allow-data-loss
- ```
-
- ![Screenshot showing stopping the primary AKS cluster](./58.png)
-
-- Browse to the secondary instance on the Microsoft SQL Server Management Studio (SSMS) and you can see that the secondary (_js-sql-dr_) instance is now promoted to primary.
-
- ![Screenshot showing browsing to the secondary instance](./59.png)
-
-- To validate that the data you updated earlier has been replicated to the secondary instance, select the _"HumanResources.Employee"_ table, click on "Edit Top 200 Rows".
-
- ![Screenshot showing Edit Top 200 Rows](./60.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./61.png)
\ No newline at end of file
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/_index.md
deleted file mode 100644
index 4465345c9b..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/_index.md
+++ /dev/null
@@ -1,127 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance Availability Groups Failover"
-linkTitle: "SQL Managed Instance Availability Groups Failover"
-weight: 1
-description: >
----
-
-## Perform database failover with SQL Managed Instance Availability Groups
-
-The following Jumpstart scenario will guide you on how to explore and test Azure Arc-enabled SQL Managed Instance Availability Groups, simulate failures and DB replication. In this scenario, you will be restoring a sample database, will initiate a failover to force HA event as well as validating database replication across multiple SQL nodes in an availability group.
-
-> **NOTE: This guide assumes you already deployed a Azure Arc-enabled SQL Managed Instance on Azure Kubernetes Service (AKS). If you haven't, this [following bootstrap Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/) offers you a way to do so in an automated fashion. All the steps and operations described in this scenario assume you used the mentioned bootstrap Jumpstart scenario and have the Client VM deployed as part of it.**
-
-## Deployed Kubernetes Resources
-
-When deploying Azure Arc-enabled SQL Managed Instance in an availability group, multiple Kubernetes resources are created to support it. The below section describes the main ones that are important to understand for this scenario.
-
-### SQL MI Pods Replicas
-
-Three SQL pods replicas will be deployed to assemble the availability group. These can be seen using the _`kubectl get pods -n -o wide`_ command, for example, _`kubectl get pods -n arc -o wide`_. It is also important to highlight that Kubernetes will spread the pods across the various nodes in the cluster.
-
-![SQL pods](./01.png)
-
-### Services & Endpoints
-
-An external endpoint is automatically provisioned for connecting to databases within the availability group. This endpoint plays the role of the availability group listener.
-
-In an availability group deployment, two endpoints, primary and secondary get created, both backed by a Kubernetes Service resource with a type of _LoadBalancer_.
-
-- Using the _`az sql mi-arc show -n jumpstart-sql --k8s-namespace arc --use-k8s`_ command, validate the deployment endpoints details and the Availability Group health status.
-
- ![az sql Azure CLI extension](./02.png)
-
- ![az sql mi-arc show command](./03.png)
-
- > **NOTE: Initiating the command will also deploy _az sql_ Azure CLI extension automatically.**
-
-- Using the _`kubectl get svc -n arc`_ command, you will be able to see the _LoadBalancer_ services used by the endpoints.
-
- ![Kubernetes services](./04.png)
-
-## Database Restore
-
-In order for you to test the HA functionality, a database restore _[RestoreDB](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/arm_template/artifacts/RestoreDB.ps1)_ PowerShell script is provided. The script will restore the _[AdventureWorks2019](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms)_ sample database directly onto the primary SQL node pod container. From the _C:\Temp_ folder, run the script using the _`.\RestoreDB.ps1`_ command.
-
-![RestoreDB script](./05.png)
-
-## Database Replication
-
-All databases are automatically added to the availability group, including all users (including the _AdventureWorks2019_ database you just restored) and system databases like _master_ and _msdb_. This capability provides a single-system view across the availability group replicas.
-
-- In addition to restoring the _AdventureWorks2019_ database, the script will also create a new text file and a desktop shortcut named _Endpoints_ that includes both the primary and the secondary SQL endpoints.
-
- ![Endpoints desktop shortcut](./06.png)
-
- ![Endpoints text file](./07.png)
-
-- Open Microsoft SQL Server Management Studio (SSMS) which is installed automatically for you as part of the [bootstrap Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_mssql_mi_arm_template/) and use the primary endpoint IP address and login to the primary DB instance using the username and password provided in the text file mentioned above.
-
- ![Microsoft SQL Server Management Studio](./08.png)
-
-- Use the username and password you entered when provisioned the environment and select "SQL Server Authentication". Alternatively, you can retrieve the username and password using the _`$env:AZDATA_USERNAME`_ and _`$env:AZDATA_PASSWORD`_ commands.
-
- ![SSMS login](./09.png)
-
- ![Primary endpoint connected](./10.png)
-
-- Follow the same process and connect to the secondary endpoint.
-
- ![Connect button](./11.png)
-
- ![Secondary endpoint connected](./12.png)
-
-- On both endpoints, expand the "Databases" and the "Always On High Availability" sections and see how the _AdventureWorks2019_ database is already automatically replicated and is part of the availability group.
-
- ![Databases replication](./13.png)
-
-- To test that the DB replication is working, a simple table modification is needed. For this example, on the primary replica, expand the "Tables" section for the database, select the _"HumanResources.Employee"_ table, click on "Edit Top 200 Rows", modify one or more records and commit the changes by saving (_`Ctrl+S`_). As you can see, in this example a change was made to _"ken0"_ title and the number of vacation hours for _"rob0"_.
-
- ![Expending database for primary](./14.png)
-
- ![Edit Top 200 Rows](./15.png)
-
- ![Modifying a table](./16.png)
-
-- On the secondary replica, expand the "Tables" section for the database, click on "Select Top 1000 Rows", and in the Results pane see how the table change is now replicated, showing the synchronization of the SQL instances in the availability group works as expected.
-
- ![Expending database for secondary](./17.png)
-
- ![Select Top 1000 Rows](./18.png)
-
- ![Replication works](./19.png)
-
-## Database Failover
-
-As you already know, the availability group includes three Kubernetes replicas with a primary and two secondaries with all CRUD operations for the availability group are managed internally, including creating the availability group or joining replicas to the availability group created.
-
-- To test that failover between the replicas, we will simulate a "crash" that will trigger an HA event and will force one of the secondary replicas to get promoted to a primary replica. Open two side-by-side PowerShell sessions. On the left side session, use the _`kubectl get pods -n arc`_ to review the deployed pods. The right-side session will be used to monitor the pods on the cluster using the _`kubectl get pods -n arc -w`_ command. As you can see, three SQL replicas with four containers each are running.
-
- ![side-by-side PowerShell sessions](./20.png)
-
-- In SSMS, you can also see that _jumpstart-sql-0_ is acting as the primary replica and _jumpstart-sql-1_ and _jumpstart-sql-2_ are the secondary. At this point, close SSMS.
-
- ![Primary and secondary replicas](./21.png)
-
-- To trigger the HA event, delete the primary replica _jumpstart-sql-0_ using the _`kubectl delete pod jumpstart-sql-0 -n arc`_ and watch how the pod gets deleted and then being deployed again due to being part of a Kubernetes _ReplicaSet_. Wait for the _jumpstart-sql-0_ pod to become ready again (and an additional few minutes for letting the availability group to recover).
-
- ![Pod deletion](./22.png)
-
-- Re-open SSMS and connect back to the previous _secondary_ endpoint. You can now see that _jumpstart-sql-0_ is now acting as the secondary replica and _jumpstart-sql-2_ was promoted to primary. In addition, run the _`az sql mi-arc show -n jumpstart-sql --k8s-namespace arc --use-k8s`_ command again and check the health status of the availability group.
-
- > **NOTE: It might take a few minutes for the availability group to return to an healthy state.**
-
- ![Successful failover](./23.png)
-
- ![Availability group health](./24.png)
-
-## Re-Validating Database Replication
-
-- Now that we perform a successful failover, we can re-validate and make sure replication still works as expected. In SSMS, re-add the second instance.
-
- ![Re-adding instance](./25.png)
-
-- In the primary endpoint connection, repeat the process of performing a change on the _AdventureWorks2019_ database _"HumanResources.Employee"_ table and check that replication is working. In the example below, you can see how new values in new rows are now replicated.
-
- ![Successful replication](./26.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_mi_ad_auth_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_mi_ad_auth_arm_template/_index.md
deleted file mode 100644
index e7b0306bb5..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_mi_ad_auth_arm_template/_index.md
+++ /dev/null
@@ -1,286 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance with AD Authentication ARM Template"
-linkTitle: "SQL Managed Instance with AD Authentication ARM Template"
-weight: 2
-description: >
----
-
-## Deploy Azure SQL Managed Instance with AD authentication support using Customer-managed keytab in directly connected mode on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) with [Active Directory Authentication](https://docs.microsoft.com/azure/azure-arc/data/active-directory-introduction) support to access control SQL Managed Instance. This scenario is deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview). This scenarios uses [Customer-managed keytab (CMK)](https://docs.microsoft.com/azure/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector) to support [Active Directory authentication in Arc-enabled SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/deploy-active-directory-sql-managed-instance?tabs=customer-managed-keytab-mode).
-
-By the end of this scenario, you will have an AKS cluster deployed with an Azure Arc Data Controller, SQL Managed Instance, Microsoft Windows Server 2022 (Datacenter) Azure VM with Active Directory Domain Services and DNS server installed, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/VNET.json) - Deploys a Virtual Network with two subnets, one to be used by the Client virtual machine and Active Directory Domain Services VM, and the other to be used by AKS cluster. Assigns DNS servers in the VNet when Active Directory authentication support is enabled in SQL Managed Instance.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/aks.json) - Deploys the AKS cluster where all the Azure Arc data services will be deployed.
- - [_addsVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/addsVm.json) - Deploys the Active Directory Domain Services Windows VM. This is where all user accounts are created to access SQL Managed Instance using Active Directory authentication, service account to assign service principal and generate keytab file, and DNS entries for domain controller and SQL Managed Instance name resolution for connectivity.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
- - User remotes into the Active Directory domain joined client Windows VM using Active Directory account, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploy and configure Azure Arc-enabled data services on the AKS cluster including the reverse DNS setup, PTR record for domain controller, data controller, SQLMI organization unit (OU), domain user account and keytab file, Active Directory Connector(ADC), and SQL Managed Instance.
-
- - In addition to deploying the data controller and SQL Managed Instance, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database is restored, windows account is created in SQL Managed Instance, and sysadmin role is automatically assigned for you as well to connect to database server using Windows Integrated authentication using SQL Server Management Studio(SSMS) or Azure Data Studio.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this Azure Arc-enabled SQL Managed Instance scenario we will set it to _**true**_. Use value _**true**_ for this scenario to deploy SQL Managed Instance.
- - _`SQLMIHA`_ - Boolean that sets whether to deploy SQL Managed Instance in high availability mode using Business Critical pricing tier. A value of _**false**_ selects General Purpose pricing tier and a value of _**true**_ selects Business Critical pricing tier. Default value is _**false**_.
- - _`enableADAuth`_ - Boolean that sets whether or not to deploy Active Directory Domain Services (ADDS) VM, for this Azure Arc-enabled SQL Managed Instance with AD authentication scenario. Default value is _**false**_. Use value _**true**_ for this scenario to deploy Domain Controller VM and support AD authentication in SQL Managed Instance.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this scenario. Default value is _**false**_.
- - _`deployBastion`_ - Boolean that sets whether to deploy Azure Bastion or not to connect to the client VM. Default value is _**false**_.
- - _`bastionHostName`_ - Azure Bastion host name. Default value is _**Arc-Data-Demo-Bastion**_.
-
- > **Note:** In case you decided to deploy SQL Managed Instance in an highly-available fashion, refer to the ["Perform database failover with SQL Managed Instance Availability Groups"](../../../day2/aks/aks_mssql_ha/_index.md) Jumpstart scenario as well as the ["High Availability with Azure Arc-enabled SQL Managed Instance"](https://learn.microsoft.com/azure/azure-arc/data/managed-instance-high-availability) product documentation.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~20-25min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources have been provisioned, you will be able to see them in the Azure portal. At this point, the resource group should have **12 various Azure resources** deployed (If you chose to deploy Azure Bastion, you will have **13 Azure resources**).
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_mi_ad_auth_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_mi_ad_auth_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_mi_ad_auth_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM. Please make sure to use User Principal Name of the domain user i.e. **arcdemo@jupstart.local** to login to Client VM through Bastion. Login will fail if using **jumpstart\arcdemo** format.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login to Client VM using Remote Desktop Connection, use **jumpstart\arcdemo** Active Directory user account to login. This user account is the domain administrator and has full privileges to setup AD authentication in SQL Managed Instance. As mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-> **NOTE: Using just arcdemo to login Client VM will not start automation script at first login, as this scenario relies on domain credentials to support AD authentication to connect SQL Managed Instance.**
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and SQL Managed Instance will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the post-run desktop](./26.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and SQL Managed Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **15 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- > **NOTE: Azure resource will not be created for SQL Managed Instance with AD authentication created in the scenario using YAML. Azure resources are created only when using ARM template or deployed SQL Managed Instance using Azure Portal**.
-
- ![Screenshot showing additional Azure resources in the resource group](./27.png)
-
-- As part of the automation, SQL Server Management Studio and Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Open Azure Data Studio to connect to SQL Managed Instance using AD authentication.
-
- > **NOTE: As part of the automation SQL Managed Instance and PostgreSQL database connections are pre-created with SQL endpoints in Azure Data Studio.**
-
- ![Screenshot showing SQL Managed Instance endpoints and database server credentials](./28.png)
-
- ![Screenshot showing SQL Managed Instance Integrate authentication](./29.png)
-
-- As part of the automation, SQL Managed Instance endpoints desktop shortcut is created to view connection information and login to the SQL Managed Instance using Windows authentication. Copy the endpoint information to login to the SQL server.
-
- ![Screenshot showing SQL Managed Instance endpoints and database server credentials](./30.png)
-
-- Open SQL Server Management Studio to connect to SQL Managed Instance using Windows Authentication.
-
- ![Screenshot showing SQL Server Management Studio desktop shortcut](./31.png)
-
-- Paste SQL Managed Instance endpoint information copied in the previous step, select Windows Authentication, leave currently selected user, and click Connect.
-
- ![Screenshot showing SQL MI connection using using Windows Authentication](./32.png)
-
-- Notice server connection information, restored default database AdventureWorks2019, and Windows user account created in SQL Server Managed Instance to support AD authentication.
-
- ![Screenshot showing SQL MI connection using using SQL Server Management Studio](./33.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./34.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_migrate/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_migrate/_index.md
deleted file mode 100644
index 024dcd70b8..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_migrate/_index.md
+++ /dev/null
@@ -1,360 +0,0 @@
----
-type: docs
-title: "Migrate to Azure Arc-enabled SQL Managed Instance"
-linkTitle: "Migrate to Azure Arc-enabled SQL Managed Instance"
-weight: 4
-description: >
----
-
-## Migrate to Azure Arc-enabled SQL Managed Instance on AKS using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can [migrate a SQL database](https://docs.microsoft.com/azure/azure-arc/data/migrate-to-managed-instance) using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/dr/intro-kubernetes) cluster using an [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an AKS cluster deployed with an Azure Arc Data Controller, SQL Managed Instance, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services. The Azure client VM will host a nested VM with SQL Server installed and configured where you will be migrating the _AdventureWorks_ sample database from:
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit). These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/VNET.json) - Deploys a Virtual Network with a single subnet to be used by the Client virtual machine.
- - [_aks_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/aks.json) - Deploys the AKS cluster where all the Azure Arc data services will be deployed.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
- - User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploys and configures Azure Arc-enabled data services on the AKS cluster including the data controller and SQL Managed Instance and a SQL Server instance in a nested VM that will act as the source SQL instance to migrate from.
-
- - In addition to deploying the data controller and SQL Managed Instance, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well on the source SQL instance on the nested VM.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/artifacts/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this Azure Arc-enabled SQL Managed Instance scenario we will set it to _**true**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, set this to either _**true**_ or _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/Migration/ARM/azuredeploy.json \
- --parameters
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdata \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/Migration/ARM/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources have been provisioned, you will be able to see them in the Azure portal. At this point, the resource group should have **8 various Azure resources** deployed (If you chose to deploy Azure Bastion, you will have **9 Azure resources**).
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_migrate/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_migrate/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_migrate/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM. Please make sure to use User Principal Name of the domain user i.e. **arcdemo@jupstart.local** to login to Client VM through Bastion. Login will fail if using **jumpstart\arcdemo** format.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/aks/Migration/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and SQL Managed Instance will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the post-run desktop](./28.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and SQL Managed Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **12 various Azure resources deployed**. The important ones to notice are:
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controllers that are now deployed on the Kubernetes clusters.
-
- - _Azure Arc-enabled SQL Managed Instance_ - The SQL Managed Instances that are now deployed on the Kubernetes clusters.
-
- ![Screenshot showing additional Azure resources in the resource group](./29.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./30.png)
-
- ![Screenshot showing Azure Data Studio extensions](./31.png)
-
-- Additionally, the SQL Managed Instances connection and the SQL instance on the nested VM will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation on the source SQL instance on the client VM.
-
- ![Screenshot showing Azure Data Studio SQL MI and nested SQL Server connection](./32.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with AKS as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/aks/aks_monitor/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
-![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./33.png)
-
-![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./34.png)
-
-## Operations
-
-### Creating backup of the _AdventureWorks_ database from the source SQL Instance to prepare for migration
-
-- As part of the automation, the script will also create a new text file and a desktop shortcut named "SQLMI Endpoints" that includes the SQL endpoint for the Azure Azure Arc-enabled SQL Managed Instance.
-
- ![Screenshot showing the Azure Arc-enabled SQL Managed Instance endpoint URLs text file](./35.png)
-
-- To connect to the source SQL instance on the nested VM, you can find the connection details in the Azure Data Studio.
-
- ![Screenshot showing the source SQL instance connection details](./36.png)
-
- ![Screenshot showing the source SQL instance connection details](./37.png)
-
-- Open Microsoft SQL Server Management Studio (SSMS) which is installed automatically for you as part of the bootstrap Jumpstart scenario and use the primary endpoint IP address for the Azure Arc-enabled SQL Managed Instance and login to the primary DB instance using the username and password provided in the text file mentioned above.
-
- ![Screenshot showing opening SQL Server Management Studio from the start menu](./38.png)
-
-- Use the username and password you entered when provisioned the environment and select “SQL Server Authentication”. Alternatively, you can retrieve the username and password using the _`$env:AZDATA_USERNAME`_ and _`$env:AZDATA_PASSWORD`_ commands.
-
- ![Screenshot showing logging into the SQL Server Management Studio](./39.png)
-
-- Connect to the source SQL instance as well using the connection details you got from the Azure Data Studio.
-
- ![Screenshot showing the SQL Server Management Studio after login](./40.png)
-
- ![Screenshot showing logging into the SQL Server Management Studio](./41.png)
-
- ![Screenshot showing the two SQL instances connected](./42.png)
-
-- Expand the source SQL instance and navigate to the _AdventureWorks_ database and execute the following query, use the same username and password as the previous step.
-
- ```Sql
- BACKUP DATABASE AdventureWorksLT2019
- TO DISK = 'C:\temp\AdventureWorksLT2019.bak'
- WITH FORMAT, MEDIANAME = 'AdventureWorksLT2019' ;
- GO
- ```
-
- ![Screenshot showing starting a new query](./43.png)
-
- ![Screenshot showing the success message after backing up the AdventureWorks database](./44.png)
-
-- You can perform the same steps to backup the _AdventureWorks_ database from Azure Data Studio.
-
- ![Screenshot showing starting a new query in Azure Data Studio](./45.png)
-
- ![Screenshot showing the success message after backing up the AdventureWorks database in Azure Data Studio](./46.png)
-### Migrate the _AdventureWorks_ database from the source SQL instance to the Azure Arc-enabled SQL Managed Instance
-
-- To migrate the backup we created, open a new PowerShell ISE session and use the following PowerShell snippet to:
- - Copy the created backup to the client VM from the nested SQL VM
- - Copy the backup to the Azure Arc-enabled SQL Managed Instance pod
- - Initiate the backup restore process
-
- ```PowerShell
- Set-Location -Path c:\temp
-$nestedWindowsUsername = "Administrator"
-$nestedWindowsPassword = "ArcDemo123!!"
-$secWindowsPassword = ConvertTo-SecureString $nestedWindowsPassword -AsPlainText -Force
-$winCreds = New-Object System.Management.Automation.PSCredential ($nestedWindowsUsername, $secWindowsPassword)
-$session = New-PSSession -VMName Arcbox-SQL -Credential $winCreds
-Copy-Item -FromSession $session -Path C:\temp\AdventureWorksLT2019.bak -Destination C:\Temp\AdventureWorksLT2019.bak
-kubectl cp ./AdventureWorksLT2019.bak jumpstart-sql-0:var/opt/mssql/data/AdventureWorksLT2019.bak -n arc -c arc-sqlmi
-kubectl exec jumpstart-sql-0 -n arc -c arc-sqlmi -- /opt/mssql-tools/bin/sqlcmd -S localhost -U $Env:AZDATA_USERNAME -P $Env:AZDATA_PASSWORD -Q "RESTORE DATABASE AdventureWorksLT2019 FROM DISK = N'/var/opt/mssql/data/AdventureWorksLT2019.bak' WITH MOVE 'AdventureWorksLT2012_Data' TO '/var/opt/mssql/data/AdventureWorksLT2012.mdf', MOVE 'AdventureWorksLT2012_Log' TO '/var/opt/mssql/data/AdventureWorksLT2012_log.ldf'"
- ```
-
- ![Screenshot showing PowerShell script to restore the backup](./47.png)
-
-- Navigate to the Azure Arc-enabled SQL Managed Instance in the Microsoft SQL Server Management Studio (SSMS) and you can see that the _AdventureWorks_ database has been restored successfully.
-
- ![Screenshot showing the restored database](./48.png)
-
-- You can also see the migrated database on Azure Data Studio.
-
- ![Screenshot showing the restored database in Azure Data Studio](./49.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./50.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_postgresql_arm_template_ado/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_postgresql_arm_template_ado/_index.md
deleted file mode 100644
index d1a6ee21ad..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_postgresql_arm_template_ado/_index.md
+++ /dev/null
@@ -1,196 +0,0 @@
----
-type: docs
-title: "PostgreSQL Azure DevOps Release"
-linkTitle: "PostgreSQL Azure DevOps Release"
-weight: 6
-description: >
----
-
-## Deploy Azure PostgreSQL on AKS using Azure DevOps Release Pipeline
-
-The following Jumpstart scenario will guide you on how to use [Azure DevOps (ADO) Release pipelines](https://docs.microsoft.com/azure/devops/pipelines/release/?view=azure-devops) to deploy a "Ready to Go" environment so you can start using Azure Arc-enabled data services with Azure PostgreSQL on [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster using [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview).
-
-By the end of this scenario, you will have an Azure DevOps Release pipeline to deploy AKS cluster with an Azure Arc Data Controller ([in "Directly Connected" mode](https://docs.microsoft.com/azure/azure-arc/data/connectivity), Azure PostgreSQL with a sample database and a Microsoft Windows Server 2022 (Datacenter) Azure VM, installed & pre-configured with all the required tools needed to work with Azure Arc Data Services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-> **NOTE: The following scenario is focusing the Azure DevOps Release pipeline creation. Once the pipeline has been created and the environment deployment has finished, the automation flow and next steps are as [described on in the main bootstrap scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/)**
-
-## Prerequisites
-
-- [Azure DevOps account](https://azure.microsoft.com/services/devops/) set up with your organization and ready for project creation.
- - (Optional) [Create new Azure DevOps organization](https://docs.microsoft.com/azure/devops/organizations/accounts/create-organization?view=azure-devops).
- - (Optional) [Create new Azure DevOps project](https://docs.microsoft.com/azure/devops/organizations/projects/create-project?view=azure-devops&tabs=preview-page).
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP)
-
- To be able to complete the scenario and its related automation, Azure service principal assigned with the “Contributor” role is required. To create it, login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Deployment
-
-In this scenario, you will create a new Release pipeline to deploy the environment ARM template for this Jumpstart scenario.
-
-- In a new or an existing ADO project, start the process of creating a new release pipeline.
-
- ![Screenshot of creating new ADO pipeline](./01.jpg)
-
- ![Screenshot of creating new ADO pipeline](./02.jpg)
-
-- To create the pipeline, we will be using an empty job template and give it a name (once done click the X button).
-
- ![Screenshot of creating new empty job template](./03.jpg)
-
- ![Screenshot of creating new empty job template](./04.jpg)
-
-- Create a new task for the stage you have just created. This task will be the one for deploying the ARM template.
-
- ![Screenshot of creating new ARM template deployment task](./05.jpg)
-
- ![Screenshot of creating new ARM template deployment task](./06.jpg)
-
-- Click on the new task to start it's configuration.
-
- ![Screenshot of deployment task config](./07.jpg)
-
-- When deploying an ARM template, the Azure Resource Manager connection and subscription must be provided.
-
- ![Screenshot of Azure Resource Manager connection config](./08.jpg)
-
- > **NOTE: For new ADO project, you will be asked to click the authorization button**
-
- ![Screenshot of Azure subscription config](./09.jpg)
-
-- Provide the Azure resource group and location where all the resources will be deployed. Make sure to validate if the service is [currently available in your Azure region](https://azure.microsoft.com/global-infrastructure/services/?products=azure-arc).
-
- ![Screenshot of resource group and location config](./10.jpg)
-
-- As mentioned, the task will deployed the existing ARM template for deploying Azure Arc-enabled data services with PostgreSQL that in the Azure Arc Jumpstart GitHub repository.
-
- - Change the Template location to "URL of the file"
-
- - Copy the raw URLs for both the [template](https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/arm_template/postgres_hs/azuredeploy.json) and the [parameters](https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/aks/arm_template/postgres_hs/azuredeploy.parameters.json) json files and paste it in it's the proper field.
-
- - The deployment ARM template requires you provide parameters values. Click on the _Edit Override template parameters_ to add your parameters values.
-
- ![Screenshot of ARM template config](./11.jpg)
-
- - _`sshRSAPublicKey`_ - Your ssh public key
- - _`spnClientId`_ - Your Azure service principal name
- - _`spnClientSecret`_ - Your Azure service principal password
- - _`spnTenantId`_ - Your Azure tenant ID
- - _`windowsAdminUsername`_ - Client Windows VM admin username
- - _`windowsAdminPassword`_ - Client Windows VM admin password
- - _`myIpAddress`_ - Public IP address of your network
- - _`logAnalyticsWorkspaceName`_ - Unique Log Analytics workspace name
- - _`deploySQLMI`_ - SQL Managed Instance deployment (true/false)
- - _`SQLMIHA`_ - SQL Managed Instance high-availability deployment (true/false)
- - _`deployPostgreSQL`_ - PostgreSQL deployment (true/false)
- - _`clusterName`_ - AKS cluster name
- - _`bastionHostName`_ - Indicate whether to deploy bastion host to manage AKS
- - _`dnsPrefix`_ - AKS unique DNS prefix
- - _`kubernetesVersion`_ - AKS Kubernetes Version (See previous prerequisite)
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _`azuredeploy.parameters.json`_ file**
-
- ![Screenshot of ARM template parameters config](./12.jpg)
-
- ![Screenshot of ARM template parameters config](./13.jpg)
-
- ![Screenshot of ARM template parameters config](./14.jpg)
-
- ![Screenshot of ARM template parameters config](./15.jpg)
-
-- Provide a deployment name.
-
- ![Screenshot of ARM template parameters config](./16.jpg)
-
-- Click the save button.
-
- ![Screenshot of config save](./17.jpg)
-
-- After saving the task configuration, continue to create the release pipeline.
-
- ![Screenshot of pipeline creation](./18.jpg)
-
- ![Screenshot of pipeline creation](./19.jpg)
-
- ![Screenshot of pipeline creation](./20.jpg)
-
- ![Screenshot of pipeline creation](./21.jpg)
-
-- Once done, click on the new release link. In this scenario, you will perform a manually triggering for the deployment. Once you do, click on the Logs button to see the progress.
-
- ![Screenshot of pipeline deployment](./22.jpg)
-
- ![Screenshot of pipeline deployment](./23.jpg)
-
- ![Screenshot of deployment progress logs](./24.jpg)
-
- ![Screenshot of deployment progress logs](./25.jpg)
-
-- Once completed, all the deployment resources will be available in the Azure portal.
-
- > **NOTE: Deployment time of the Azure resources (AKS + Windows VM) can take ~25-30 minutes.**
-
- ![Screenshot of deployment completed](./26.jpg)
-
- ![Screenshot of Azure resources](./27.jpg)
-
-- As mentioned, this scenario is focusing on the Azure DevOps Release pipeline creation. At this point, now that you have the Azure resources created, continue to the next steps as [described on in the main bootstrap scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/aks/aks_postgresql_arm_template/#windows-login--post-deployment).
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/_index.md
deleted file mode 100644
index b8a7253223..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/_index.md
+++ /dev/null
@@ -1,7 +0,0 @@
----
-type: docs
-title: "Kubernetes Cluster API"
-linkTitle: "Kubernetes Cluster API"
-weight: 2
-description: >-
----
\ No newline at end of file
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/_index.md
deleted file mode 100644
index 4de120b282..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/_index.md
+++ /dev/null
@@ -1,125 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance Availability Groups Failover"
-linkTitle: "SQL Managed Instance Availability Groups Failover"
-weight: 1
-description: >
----
-
-## Perform database failover with SQL Managed Instance Availability Groups
-
-The following Jumpstart scenario will guide you on how to explore and test Azure Arc-enabled SQL Managed Instance Availability Groups, simulate failures and DB replication. In this scenario, you will be restoring a sample database, will initiate a failover to force HA event as well as validating database replication across multiple SQL nodes in an availability group.
-
-> **NOTE: This guide assumes you already deployed an Azure Arc-enabled SQL Managed Instance on Cluster API (CAPI) cluster using the Cluster API Azure Provider (CAPZ). If you haven't, the [following bootstrap Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/cluster_api/capi_azure/arm_template/mssql_mi/) offers you a way to do so in an automated fashion. All the steps and operations described in this scenario assume you used the mentioned bootstrap Jumpstart scenario and have the Client VM deployed as part of it.**
-
-> **NOTE: Azure Arc-enabled SQL Managed Instance with Availability Groups is currently in [preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Deployed Kubernetes Resources
-
-When deploying Azure Arc-enabled SQL Managed Instance in an availability group, multiple Kubernetes resources are created to support it. The below section describes the main ones that are important to understand for this scenario.
-
-### SQL MI Pods Replicas
-
-Three SQL pods replicas will be deployed to assemble the availability group. These can be seen using the _`kubectl get pods -n -o wide`_ command, for example, _`kubectl get pods -n arc -o wide`_. It is also important to highlight that Kubernetes will spread the pods across the various nodes in the cluster.
-
-![SQL pods](./01.png)
-
-### Services & Endpoints
-
-An external endpoint is automatically provisioned for connecting to databases within the availability group. This endpoint plays the role of the availability group listener.
-
-In an availability group deployment, two endpoints, primary and secondary get created, both backed by a Kubernetes Service resource with a type of _LoadBalancer_.
-
-- Using the _`az sql mi-arc show -n jumpstart-sql --k8s-namespace arc --use-k8s`_ command, validate the deployment endpoints details and the Availability Group health status.
-
- ![az sql Azure CLI extension](./02.png)
-
- ![az sql mi-arc show command](./03.png)
-
- > **NOTE: Initiating the command will also deploy _az sql_ Azure CLI extension automatically.**
-
-- Using the _`kubectl get svc -n arc`_ command, you will be able to see the _LoadBalancer_ services used by the endpoints.
-
- ![Kubernetes services](./04.png)
-
-## Database Restore
-
-In order for you to test the HA functionality and as part of the bootstrap Jumpstart scenario, the [AdventureWorks2019](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms)_ sample database was directly restored onto the primary SQL node pod container.
-
-## Database Replication
-
-All databases are automatically added to the availability group, including all users (including the _AdventureWorks2019_ database you just restored) and system databases like _master_ and _msdb_. This capability provides a single-system view across the availability group replicas.
-
-To retrieve the SQL Managed Instance endpoints, a desktop shortcut for a _SQLMI Endpoints_ text file is automatically created for you that includes both the primary and the secondary SQL endpoints.
-
-![Endpoints script](./05.png)
-
-![Endpoints text file](./06.png)
-
-- Open Microsoft SQL Server Management Studio (SSMS) which is installed automatically for you as part of the [bootstrap Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/) and use the primary endpoint IP address and login to the primary DB instance using the username and password provided in the text file mentioned above.
-
- ![Microsoft SQL Server Management Studio](./07.png)
-
- ![SSMS login](./08.png)
-
- ![Primary endpoint connected](./09.png)
-
-- Follow the same process and connect to the secondary endpoint.
-
- ![Connect button](./10.png)
-
- ![Secondary endpoint connected](./11.png)
-
-- On both endpoints, expand the "Databases" and the "Always On High Availability" sections and see how the _AdventureWorks2019_ database is already automatically replicated and is part of the availability group.
-
- ![Databases replication](./12.png)
-
-- To test that the DB replication is working, a simple table modification is needed. For this example, on the primary replica, expand the "Tables" section for the database, select the _"HumanResources.Employee"_ table, click on "Edit Top 200 Rows", modify one or more records and commit the changes by saving (_`Ctrl+S`_). As you can see, in this example a change was made to _"ken0"_ title and the number of vacation hours for _"rob0"_.
-
- ![Expending database for primary](./13.png)
-
- ![Edit Top 200 Rows](./14.png)
-
- ![Modifying a table](./15.png)
-
-- On the secondary replica, expand the "Tables" section for the database, click on "Select Top 1000 Rows", and in the Results pane see how the table change is now replicated, showing the synchronization of the SQL instances in the availability group works as expected.
-
- ![Expending database for secondary](./16.png)
-
- ![Select Top 1000 Rows](./17.png)
-
- ![Replication works](./18.png)
-
-## Database Failover
-
-As you already know, the availability group includes three Kubernetes replicas with a primary and two secondaries with all CRUD operations for the availability group are managed internally, including creating the availability group or joining replicas to the availability group created.
-
-- To test that failover between the replicas, we will simulate a "crash" that will trigger an HA event and will force one of the secondary replicas to get promoted to a primary replica. Open two side-by-side PowerShell sessions. On the left side session, use the _`kubectl get pods -n arc`_ to review the deployed pods. The right-side session will be used to monitor the pods on the cluster using the _`kubectl get pods -n arc -w`_ command. As you can see, three SQL replicas with four containers each are running.
-
- ![side-by-side PowerShell sessions](./19.png)
-
-- In SSMS, you can also see that _jumpstart-sql-0_ is acting as the primary replica and _jumpstart-sql-1_ and _jumpstart-sql-2_ are the secondary. At this point, close SSMS.
-
- ![Primary and secondary replicas](./20.png)
-
-- To trigger the HA event, delete the primary replica _jumpstart-sql-0_ using the _`kubectl delete pod jumpstart-sql-0 -n arc`_ and watch how the pod gets deleted and then being deployed again due to being part of a Kubernetes _ReplicaSet_. Wait for the _jumpstart-sql-0_ pod to become ready again (and an additional few minutes for letting the availability group to recover).
-
- ![Pod deletion](./21.png)
-
-- Re-open SSMS and connect back to previous _secondary_ endpoint. You can now see that _jumpstart-sql-0_ and _jumpstart-sql-2_ are now acting as the secondary replica and _jumpstart-sql-1_ was promoted to primary. In addition, run the _`az sql mi-arc show -n jumpstart-sql --k8s-namespace arc --use-k8s`_ command again and check the health status of the availability group.
-
- > **NOTE: It might take a few minutes for the availability group to return to an healthy state.**
-
- ![Successful failover](./22.png)
-
- ![Availability group health](./23.png)
-
-## Re-Validating Database Replication
-
-- Now that we perform a successful failover, we can re-validate and make sure replication still works as expected. In SSMS, re-add the second instance.
-
- ![Re-adding instance](./24.png)
-
-- In the primary endpoint connection, repeat the process of performing a change on the _AdventureWorks2019_ database _"HumanResources.Employee"_ table and check that replication is working In the example below, you can see how new values in new rows are now replicated.
-
- ![Successful replication](./25.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/eks/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/eks/_index.md
deleted file mode 100644
index c03032be58..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/eks/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Amazon Elastic Kubernetes Service"
-linkTitle: "Amazon Elastic Kubernetes Service"
-weight: 4
-description: >-
- If you are working in a multi-cloud environment, the scenario in this section will guide on creating an Amazon Elastic Kubernetes Service (EKS) with Azure Arc-enabled data services integration in an automated fashion using Terraform.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_dc_vanilla_terraform/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_dc_vanilla_terraform/_index.md
deleted file mode 100644
index 878478de33..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_dc_vanilla_terraform/_index.md
+++ /dev/null
@@ -1,311 +0,0 @@
----
-type: docs
-title: "Data Controller Terraform plan"
-linkTitle: "Data Controller Terraform plan"
-weight: 1
-description: >
----
-
-## Deploy a vanilla Azure Arc Data Controller in a directly connected mode on EKS using Terraform
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using Azure Arc Data Services and deploy Azure data services on [Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/) cluster, using [Terraform](https://www.terraform.io/).
-
-By the end of this scenario, you will have an EKS cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) AWS EC2 instance VM, installed & pre-configured with all the required tools needed to work with Azure Arc Data Services.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Install](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) and [Configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) AWS CLI.
-
-- [Create a free Amazon Web Services account](https://aws.amazon.com/free/) if you don't already have one.
-
-- [Install Terraform >=1.0](https://learn.hashicorp.com/terraform/getting-started/install.html)
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Follow the steps [here](https://docs.microsoft.com/azure/azure-arc/kubernetes/custom-locations#enable-custom-locations-on-cluster) or run the command below to retrieve your AAD Tenant Specific ObjectID for the "Custom Locations RP" Enterprise Application needed to onboard Custom Locations on EKS:
-
- ```shell
- # Note that the APPLICATION ID: bc313c14-388c-4e7d-a58e-70017303ee3b is constant across all tenants
- az ad sp show --id 'bc313c14-388c-4e7d-a58e-70017303ee3b' --query id -o tsv
- ```
-- Create a resource group
-
- ```shell
- az group create --name "Arc-Data-Demo" --location "eastus"
- ```
-### Create a new AWS IAM Role & Key
-
-Create AWS User IAM Key. An access key grants programmatic access to your resources which we will be using later in this scenario.
-
-- Navigate to the [IAM Access page](https://console.aws.amazon.com/iam/home#/home).
-
- ![Screenshot showing creating an AWS IAM Role & Key](./01.png)
-
-- Select the **Users** from the side menu.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./02.png)
-
-- Select the **User** you want to create the access key for.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./03.png)
-
-- Select **Security credentials** of the **User** selected.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./04.png)
-
-- Under **Access Keys** select **Create Access Keys**.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./05.png)
-
-- In the popup window it will show you the ***Access key ID*** and ***Secret access key***. Save both of these values to configure the **Terraform plan** variables later.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./06.png)
-
-- In order to open a RDP session to the Windows Client EC2 instance, an EC2 Key Pair is required. From the *Services* menu, click on *"EC2"*, enter the *Key Pairs* settings from the left sidebar (under the *Network & Security* section) and click on *"Create key pair"* (top-right corner) to create a new key pair.
-
- ![Screenshot showing creating an EC2 Key Pair](./07.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./08.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./09.png)
-
-- Provide a meaningful name, for example *terraform*, and click on *"Create key pair"* which will then automatically download the created *pem* file.
-
- ![Screenshot showing creating an EC2 Key Pair](./10.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./11.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./12.png)
-
-- Copy the downloaded *pem* file to where the terraform binaries are located (in your cloned repository directory).
-
- ![Screenshot showing creating an EC2 Key Pair](./13.png)
-
- > **NOTE: EC2 Key Pairs are regional.**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the Terraform runtime environment variables in the _terraform.tfvars_ file (1-time edit). The variables are being used throughout the deployment.
-
-- [Screenshot showing creating the main Terraform plan](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/main.tf) will initiate the deployment of the other modules:
-
- - [_clientVM_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/clientVM/main.tf) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_cluster_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/cluster/main.tf) - Deploys the EKS cluster where all the Azure Arc data services will be deployed.
- - [workers](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/workers/main.tf) - Deploys the EKS cluster's worker nodes.
-
-- User remotes into client Windows VM, which automatically kicks off the DataServicesLogonScript PowerShell script that deploy and configure Azure Arc-enabled data services on the EKS cluster including the data controller.
-
-## Deployment
-
-As mentioned, the Terraform plan will deploy an EKS cluster, the Azure Arc Data Controller on that cluster and an EC2 Windows Server 2022 Client instance.
-
-- Before running the Terraform plan, create a terraform.tfvars file in the root of the terraform folder and supply some values for your environment.
-
- ```HCL
- AWS_ACCESS_KEY_ID = "ZFTIFC443FTFDEZ5TKNR"
- AWS_SECRET_ACCESS_KEY = "fakeSecretValue1dfd343sd5712adfddjh"
- AWS_DEFAULT_REGION = "us-west-1"
- azureLocation = "eastus"
- spnClientId = "1414133c-9786-53a4-b231-f87c143ebdb1"
- spnClientSecret = "fakeSecretValue123458125712ahjeacjh"
- spnTenantId = "33572583-d294-5b56-c4e6-dcf9a297ec17"
- subscriptionId = "33987583-A984-5C87-T4e3-POf7a397ec17"
- resourceGroup = "Arc-Data-Demo"
- workspaceName = "la-arc-001"
- deploySQLMI = false
- SQLMIHA = false
- deployPostgreSQL = false
- customLocationObjectId = "649cb28f-bc13-492a-9470-c8bf01fa8eeb"
- ```
-
-- Variable reference:
-
- - **_`AWS_ACCESS_KEY_ID`_** - Your AWS access key.
- - **_`AWS_SECRET_ACCESS_KEY`_** - Your AWS secret access key.
- - **_`AWS_DEFAULT_REGION`_** - AWS location code (e.g. 'us-west-1', 'us-east-2', etc.).
- - **_`azureLocation`_** - Azure location code (e.g. 'eastus', 'westus2', etc.).
- - **_`spnClientId`_** - Your Azure service principal id.
- - **_`spnClientSecret`_** - Your Azure service principal secret.
- - **_`spnTenantId`_** - Your Azure tenant id.
- - **_`subscriptionId`_** - Your Azure subscription Id.
- - **_`resourceGroup`_** - Resource group which will contain all of the Azure Arc data services resources.
- - **_`workspaceName`_** - Unique name for the ArcBox Log Analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller vanilla scenario we leave it set to _**false**_.
- - **_`customLocationObjectId`_** - The Azure AD application used by Azure Arc service retrieved in the prerequisites section.
-
-> **NOTE: Any variables in bold are required. If any optional parameters are not provided, defaults will be used.**
-
-- Now you will deploy the Terraform file. Navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform) and run the commands below:
-
- ```shell
- terraform init
- terraform plan -out=infra.out
- terraform apply "infra.out"
- ```
-> **NOTE: The deployment time for this scenario can take ~20-35min**
-
-- Example output from `terraform init`:
-
- ![Screenshot showing creating the terraform init command output](./14.png)
-
-- Example output from `terraform plan -out=infra.out`:
-
- ![Screenshot showing creating the terraform plan command output](./15.png)
-
-- Once completed, the plan will output a decrypted password for your Windows Client instance that you will use to RDP into it. Before connecting to the Client instance, you can review the EKS cluster and the EC2 instances created. Notice how 4 instances were created; 3 EKS nodes and the Client instance.
-
- ![Screenshot showing creating the terraform apply command output](./16.png)
-
- ![Screenshot showing creating the new EKS cluster](./17.png)
-
- ![Screenshot showing creating the new EKS cluster](./18.png)
-
- ![Screenshot showing creating the new EC2 instances](./19.png)
-
- ![Screenshot showing creating the new EC2 instances](./20.png)
-
- ![Screenshot showing creating the new EC2 instances](./21.png)
-
-## Windows Login & Post Deployment
-
-- Now that the first phase of the automation is completed, it is time to RDP to the client VM. Select the Windows instance, click *"Connect"* and download the Remote Desktop file.
-
- ![Screenshot showing starting an RDP session to the Client instance](./22.png)
-
- ![Screenshot showing starting an RDP session to the Client instance](./23.png)
-
-- Using the decrypted password generated from the plan output, RDP the Windows instance. In case you need to get the password later, use the ```terraform output``` command to re-present the plan output.
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller will be deployed on the cluster and be ready to use.
-
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the post-run desktop](./32.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **4 various Azure resources deployed**.
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./33.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./34.png)
-
- ![Screenshot showing Azure Data Studio extensions](./35.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./36.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./37.png)
-## Delete the deployment
-
-- If you want to delete the entire Azure environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./38.png)
-
-- If you want to delete the entire environment, use the _`terraform destroy`_ to delete all of the AWS resources.
-
- ```shell
- terraform destroy --auto-approve
- ```
-
- ![Screenshot showing the deletion of all AWS resources](./39.png)
-
- > **NOTE: Because the following resources were created by EKS that creates internal AWS dependencies that Terraform has no knowledge of from our plan, we need to delete the resources from AWS console as `terraform destroy` is cleaning up - this allows us to avoid dependency conflicts and ongoing billing from orphaned resources such as EKS Volumes.**
-
-- While the `destroy` command is running, delete any new Load Balancers created as EKS Services (`EC2 > Load Balancing > Load Balancers`) that are deployed in AWS from the Console:
-
- ![Screenshot showing the Deletion of Load Balancers](./40.png)
-
-- While the `destroy` command is running, delete any new Elastic Block Stores, created as EKS Persistent Volumes (`EC2 > Elastic Block Store > Volumes`) that are deployed in AWS from the Console:
-
- ![Screenshot showing the Deletion of Elastic Block Stores](./41.png)
\ No newline at end of file
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_mssql_mi_terraform/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_mssql_mi_terraform/_index.md
deleted file mode 100644
index 30ada43c9f..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_mssql_mi_terraform/_index.md
+++ /dev/null
@@ -1,399 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance Terraform plan"
-linkTitle: "SQL Managed Instance Terraform plan"
-weight: 2
-description: >
----
-
-## Deploy Azure SQL Managed Instance in directly connected mode on EKS using a Terraform
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on [Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/) cluster using [Terraform](https://www.terraform.io/)
-
-By the end of this scenario, you will have an EKS cluster deployed with an Azure Arc Data Controller, SQL Managed Instance, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Install](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) and [Configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) AWS CLI.
-
-- [Create a free Amazon Web Services account](https://aws.amazon.com/free/) if you don't already have one.
-
-- [Install Terraform >=1.0](https://learn.hashicorp.com/terraform/getting-started/install.html)
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Follow the steps [here](https://docs.microsoft.com/azure/azure-arc/kubernetes/custom-locations#enable-custom-locations-on-cluster) or run the command below to retrieve your AAD Tenant Specific ObjectID for the "Custom Locations RP" Enterprise Application needed to onboard Custom Locations on EKS:
-
- ```shell
- # Note that the APPLICATION ID: bc313c14-388c-4e7d-a58e-70017303ee3b is constant across all tenants
- az ad sp show --id 'bc313c14-388c-4e7d-a58e-70017303ee3b' --query id -o tsv
- ```
-- Create a resource group
-
- ```shell
- az group create --name "Arc-Data-Demo" --location "eastus"
- ```
-### Create a new AWS IAM Role & Key
-
-Create AWS User IAM Key. An access key grants programmatic access to your resources which we will be using later in this scenario.
-
-- Navigate to the [IAM Access page](https://console.aws.amazon.com/iam/home#/home).
-
- ![Screenshot showing creating an AWS IAM Role & Key](./01.png)
-
-- Select the **Users** from the side menu.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./02.png)
-
-- Select the **User** you want to create the access key for.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./03.png)
-
-- Select **Security credentials** of the **User** selected.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./04.png)
-
-- Under **Access Keys** select **Create Access Keys**.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./05.png)
-
-- In the popup window it will show you the ***Access key ID*** and ***Secret access key***. Save both of these values to configure the **Terraform plan** variables later.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./06.png)
-
-- In order to open a RDP session to the Windows Client EC2 instance, an EC2 Key Pair is required. From the *Services* menu, click on *"EC2"*, enter the *Key Pairs* settings from the left sidebar (under the *Network & Security* section) and click on *"Create key pair"* (top-right corner) to create a new key pair.
-
- ![Screenshot showing creating an EC2 Key Pair](./07.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./08.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./09.png)
-
-- Provide a meaningful name, for example *terraform*, and click on *"Create key pair"* which will then automatically download the created *pem* file.
-
- ![Screenshot showing creating an EC2 Key Pair](./10.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./11.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./12.png)
-
-- Copy the downloaded *pem* file to where the terraform binaries are located (in your cloned repository directory).
-
- ![Screenshot showing creating an EC2 Key Pair](./13.png)
-
- > **NOTE: EC2 Key Pairs are regional.**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the Terraform runtime environment variables in the _terraform.tfvars_ file (1-time edit). The variables are being used throughout the deployment.
-
-- [Screenshot showing creating the main Terraform plan](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/main.tf) will initiate the deployment of the other modules:
-
- - [_clientVM_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/clientVM/main.tf) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_cluster_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/cluster/main.tf) - Deploys the EKS cluster where all the Azure Arc data services will be deployed.
- - [workers](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/workers/main.tf) - Deploys the EKS cluster's worker nodes.
-
-- User remotes into client Windows VM, which automatically kicks off the DataServicesLogonScript PowerShell script that deploy and configure Azure Arc-enabled data services on the EKS cluster including the data controller and SQL Managed Instance.
-
-- In addition to deploying the data controller and SQL Managed Instance, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, the Terraform plan will deploy an EKS cluster, the Azure Arc Data Controller and the SQL Managed Instance on that cluster and an EC2 Windows Server 2022 Client instance.
-
-- Before running the Terraform plan, create a terraform.tfvars file in the root of the terraform folder and supply some values for your environment.
-
- ```HCL
- AWS_ACCESS_KEY_ID = "ZFTIFC443FTFDEZ5TKNR"
- AWS_SECRET_ACCESS_KEY = "fakeSecretValue1dfd343sd5712adfddjh"
- AWS_DEFAULT_REGION = "us-west-1"
- azureLocation = "eastus"
- spnClientId = "1414133c-9786-53a4-b231-f87c143ebdb1"
- spnClientSecret = "fakeSecretValue123458125712ahjeacjh"
- spnTenantId = "33572583-d294-5b56-c4e6-dcf9a297ec17"
- subscriptionId = "33987583-A984-5C87-T4e3-POf7a397ec17"
- resourceGroup = "Arc-Data-Demo"
- workspaceName = "la-arc-001"
- deploySQLMI = true
- SQLMIHA = false
- deployPostgreSQL = false
- customLocationObjectId = "649cb28f-bc13-492a-9470-c8bf01fa8eeb"
- ```
-
-- Variable reference:
-
- - **_`AWS_ACCESS_KEY_ID`_** - Your AWS access key.
- - **_`AWS_SECRET_ACCESS_KEY`_** - Your AWS secret access key.
- - **_`AWS_DEFAULT_REGION`_** - AWS location code (e.g. 'us-west-1', 'us-east-2', etc.).
- - **_`azureLocation`_** - Azure location code (e.g. 'eastus', 'westus2', etc.).
- - **_`spnClientId`_** - Your Azure service principal id.
- - **_`spnClientSecret`_** - Your Azure service principal secret.
- - **_`spnTenantId`_** - Your Azure tenant id.
- - **_`subscriptionId`_** - Your Azure subscription Id.
- - **_`resourceGroup`_** - Resource group which will contain all of the Azure Arc data services resources.
- - **_`workspaceName`_** - Unique name for the ArcBox Log Analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller vanilla scenario we leave it set to _**false**_.
- - **_`customLocationObjectId`_** - The Azure AD application used by Azure Arc service retrieved in the prerequisites section.
-
-> **NOTE: Any variables in bold are required. If any optional parameters are not provided, defaults will be used.**
-
-> **Note:** In case you decided to deploy SQL Managed Instance in an highly-available fashion, refer to the ["Perform database failover with SQL Managed Instance Availability Groups"](../../day2/aks/aks_mssql_ha/_index.md) Jumpstart scenario as well as the ["High Availability with Azure Arc-enabled SQL Managed Instance"](https://learn.microsoft.com/azure/azure-arc/data/managed-instance-high-availability) product documentation.
-
-- Now you will deploy the Terraform file. Navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform) and run the commands below:
-
- ```shell
- terraform init
- terraform plan -out=infra.out
- terraform apply "infra.out"
- ```
-
-> **NOTE: The deployment time for this scenario can take ~20-35min**
-
-- Example output from `terraform init`:
-
- ![Screenshot showing creating the terraform init command output](./14.png)
-
-- Example output from `terraform plan -out=infra.out`:
-
- ![Screenshot showing creating the terraform plan command output](./15.png)
-
-- Once completed, the plan will output a decrypted password for your Windows Client instance that you will use to RDP into it. Before connecting to the Client instance, you can review the EKS cluster and the EC2 instances created. Notice how 4 instances were created; 3 EKS nodes and the Client instance.
-
- ![Screenshot showing creating the terraform apply command output](./16.png)
-
- ![Screenshot showing creating the new EKS cluster](./17.png)
-
- ![Screenshot showing creating the new EKS cluster](./18.png)
-
- ![Screenshot showing creating the new EC2 instances](./19.png)
-
- ![Screenshot showing creating the new EC2 instances](./20.png)
-
- ![Screenshot showing creating the new EC2 instances](./21.png)
-
-## Windows Login & Post Deployment
-
-- Now that the first phase of the automation is completed, it is time to RDP to the client VM. Select the Windows instance, click *"Connect"* and download the Remote Desktop file.
-
- ![Screenshot showing starting an RDP session to the Client instance](./22.png)
-
- ![Screenshot showing starting an RDP session to the Client instance](./23.png)
-
-- Using the decrypted password generated from the plan output, RDP the Windows instance. In case you need to get the password later, use the ```terraform output``` command to re-present the plan output.
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller will be deployed on the cluster and be ready to use.
-
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the PowerShell logon script run](./32.png)
-
- ![Screenshot showing the PowerShell logon script run](./33.png)
-
- ![Screenshot showing the PowerShell logon script run](./34.png)
-
- ![Screenshot showing the PowerShell logon script run](./35.png)
-
- ![Screenshot showing the post-run desktop](./36.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and SQL Managed Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **5 various Azure resources deployed**.
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- - _Azure Arc-enabled SQL Managed Instance_ - The SQL Managed Instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./37.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./38.png)
-
- ![Screenshot showing Azure Data Studio extensions](./39.png)
-
-- Additionally, the SQL Managed Instance connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio SQL MI connection](./40.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./41.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./42.png)
-
-## High Availability with SQL Always-On availability groups
-
-Azure Arc-enabled SQL Managed Instance is deployed on Kubernetes as a containerized application and uses kubernetes constructs such as stateful sets and persistent storage to provide built-in health monitoring, failure detection, and failover mechanisms to maintain service health. For increased reliability, you can also configure Azure Arc-enabled SQL Managed Instance to deploy with extra replicas in a high availability configuration.
-
-For showcasing and testing SQL Managed Instance with [Always On availability groups](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-high-availability#deploy-with-always-on-availability-groups), a dedicated [Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/) is available to help you simulate failures and get hands-on experience with this deployment model.
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance stress simulation
-
-Included in this scenario, is a dedicated SQL stress simulation tool named _SqlQueryStress_ automatically installed for you on the Client VM. _SqlQueryStress_ will allow you to generate load on the Azure Arc-enabled SQL Managed Instance that can be done used to showcase how the SQL database and services are performing as well to highlight operational practices described in the next section.
-
-- To start with, open the _SqlQueryStress_ desktop shortcut and connect to the SQL Managed Instance **primary** endpoint IP address. This can be found in the _SQLMI Endpoints_ text file desktop shortcut that was also created for you alongside the username and password you used to deploy the environment.
-
- ![Screenshot showing opened SqlQueryStress](./43.png)
-
- ![Screenshot showing SQLMI Endpoints text file](./44.png)
-
-> **NOTE: Secondary SQL Managed Instance endpoint will be available only when using the [HA deployment model ("Business Critical")](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/cluster_api/capi_azure/capi_mssql_ha/).**
-
-- To connect, use "SQL Server Authentication" and select the deployed sample _AdventureWorks_ database (you can use the "Test" button to check the connection).
-
- ![Screenshot showing SqlQueryStress connected](./45.png)
-
-- To generate some load, we will be running a simple stored procedure. Copy the below procedure and change the number of iterations you want it to run as well as the number of threads to generate even more load on the database. In addition, change the delay between queries to 1ms for allowing the stored procedure to run for a while.
-
- ```sql
- exec [dbo].[uspGetEmployeeManagers] @BusinessEntityID = 8
- ```
-
-- As you can see from the example below, the configuration settings are 100,000 iterations, five threads per iteration, and a 1ms delay between queries. These configurations should allow you to have the stress test running for a while.
-
- ![Screenshot showing SqlQueryStress settings](./46.png)
-
- ![Screenshot showing SqlQueryStress running](./47.png)
-
-### Azure Arc-enabled SQL Managed Instance monitoring using Grafana
-
-When deploying Azure Arc-enabled data services, a [Grafana](https://grafana.com/) instance is also automatically deployed on the same Kubernetes cluster and include built-in dashboards for both Kubernetes infrastructure as well SQL Managed Instance monitoring (PostgreSQL dashboards are included as well but we will not be covering these in this section).
-
-- Now that you have the _SqlQueryStress_ stored procedure running and generating load, we can look how this is shown in the the built-in Grafana dashboard. As part of the automation, a new URL desktop shortcut simply named "Grafana" was created.
-
- ![Screenshot showing Grafana desktop shortcut](./48.png)
-
-- [Optional] The IP address for this instance represents the Kubernetes _LoadBalancer_ external IP that was provision as part of Azure Arc-enabled data services. Use the _`kubectl get svc -n arc`_ command to view the _metricsui_ external service IP address.
-
- ![Screenshot showing metricsui Kubernetes service](./49.png)
-
-- To log in, use the same username and password that is in the _SQLMI Endpoints_ text file desktop shortcut.
-
- ![Screenshot showing Grafana username and password](./50.png)
-
-- Navigate to the built-in "SQL Managed Instance Metrics" dashboard.
-
- ![Screenshot showing Grafana dashboards](./51.png)
-
- ![Screenshot showing Grafana "SQL Managed Instance Metrics" dashboard](./52.png)
-
-- Change the dashboard time range to "Last 5 minutes" and re-run the stress test using _`SqlQueryStress`_ (in case it was already finished).
-
- ![Screenshot showing "Last 5 minutes" time range](./53.png)
-
-- You can now see how the SQL graphs are starting to show increased activity and load on the database instance.
-
- ![Screenshot showing increased load activity](./54.png)
-
- ![Screenshot showing increased load activity](./55.png)
-
-## Delete the deployment
-
-- If you want to delete the entire Azure environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./56.png)
-
-- If you want to delete the entire environment, use the _`terraform destroy`_ to delete all of the AWS resources.
-
- ```shell
- terraform destroy --auto-approve
- ```
-
- ![Screenshot showing the deletion of all AWS resources](./57.png)
-
- > **NOTE: Because the following resources were created by EKS that creates internal AWS dependencies that Terraform has no knowledge of from our plan, we need to delete the resources from AWS console as `terraform destroy` is cleaning up - this allows us to avoid dependency conflicts and ongoing billing from orphaned resources such as EKS Volumes.**
-
-- While the `destroy` command is running, delete any new Load Balancers created as EKS Services (`EC2 > Load Balancing > Load Balancers`) that are deployed in AWS from the Console:
-
- ![Screenshot showing the Deletion of Load Balancers](./58.png)
-
-- While the `destroy` command is running, delete any new Elastic Block Stores, created as EKS Persistent Volumes (`EC2 > Elastic Block Store > Volumes`) that are deployed in AWS from the Console:
-
- ![Screenshot showing the Deletion of Elastic Block Stores](./59.png)
\ No newline at end of file
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_postgres_terraform/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_postgres_terraform/_index.md
deleted file mode 100644
index 7c88147b76..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/eks/eks_postgres_terraform/_index.md
+++ /dev/null
@@ -1,329 +0,0 @@
----
-type: docs
-title: "PostgreSQL Terraform Plan"
-linkTitle: "PostgreSQL Terraform Plan"
-weight: 3
-description: >
----
-
-## Deploy Azure PostgreSQL in directly connected mode on EKS using Terraform
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [PostgreSQL](https://docs.microsoft.com/azure/azure-arc/data/what-is-azure-arc-enabled-postgres-hyperscale) deployed on [Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/) cluster using [Terraform](https://www.terraform.io/)
-
-By the end of this scenario, you will have an EKS cluster deployed with an Azure Arc Data Controller, PostgreSQL instance, and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Install](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) and [Configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) AWS CLI.
-
-- [Create a free Amazon Web Services account](https://aws.amazon.com/free/) if you don't already have one.
-
-- [Install Terraform >=1.0](https://learn.hashicorp.com/terraform/getting-started/install.html)
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with multiple Role-based access control (RBAC) roles is required:
-
- - "Contributor" - Required for provisioning Azure resources
- - "Security admin" - Required for installing Cloud Defender Azure-Arc enabled Kubernetes extension and dismiss alerts
- - "Security reader" - Required for being able to view Azure-Arc enabled Kubernetes Cloud Defender extension findings
- - "Monitoring Metrics Publisher" - Required for being Azure Arc-enabled data services billing, monitoring metrics, and logs management
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security admin" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Security reader" --scopes /subscriptions/$subscriptionId
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Monitoring Metrics Publisher" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Follow the steps [here](https://docs.microsoft.com/azure/azure-arc/kubernetes/custom-locations#enable-custom-locations-on-cluster) or run the command below to retrieve your AAD Tenant Specific ObjectID for the "Custom Locations RP" Enterprise Application needed to onboard Custom Locations on EKS:
-
- ```shell
- # Note that the APPLICATION ID: bc313c14-388c-4e7d-a58e-70017303ee3b is constant across all tenants
- az ad sp show --id 'bc313c14-388c-4e7d-a58e-70017303ee3b' --query id -o tsv
- ```
-- Create a resource group
-
- ```shell
- az group create --name "Arc-Data-Demo" --location "eastus"
- ```
-### Create a new AWS IAM Role & Key
-
-Create AWS User IAM Key. An access key grants programmatic access to your resources which we will be using later in this scenario.
-
-- Navigate to the [IAM Access page](https://console.aws.amazon.com/iam/home#/home).
-
- ![Screenshot showing creating an AWS IAM Role & Key](./01.png)
-
-- Select the **Users** from the side menu.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./02.png)
-
-- Select the **User** you want to create the access key for.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./03.png)
-
-- Select **Security credentials** of the **User** selected.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./04.png)
-
-- Under **Access Keys** select **Create Access Keys**.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./05.png)
-
-- In the popup window it will show you the ***Access key ID*** and ***Secret access key***. Save both of these values to configure the **Terraform plan** variables later.
-
- ![Screenshot showing creating an AWS IAM Role & Key](./06.png)
-
-- In order to open a RDP session to the Windows Client EC2 instance, an EC2 Key Pair is required. From the *Services* menu, click on *"EC2"*, enter the *Key Pairs* settings from the left sidebar (under the *Network & Security* section) and click on *"Create key pair"* (top-right corner) to create a new key pair.
-
- ![Screenshot showing creating an EC2 Key Pair](./07.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./08.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./09.png)
-
-- Provide a meaningful name, for example *terraform*, and click on *"Create key pair"* which will then automatically download the created *pem* file.
-
- ![Screenshot showing creating an EC2 Key Pair](./10.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./11.png)
-
- ![Screenshot showing creating an EC2 Key Pair](./12.png)
-
-- Copy the downloaded *pem* file to where the terraform binaries are located (in your cloned repository directory).
-
- ![Screenshot showing creating an EC2 Key Pair](./13.png)
-
- > **NOTE: EC2 Key Pairs are regional.**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the Terraform runtime environment variables in the _terraform.tfvars_ file (1-time edit). The variables are being used throughout the deployment.
-
-- [Screenshot showing creating the main Terraform plan](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/main.tf) will initiate the deployment of the other modules:
-
- - [_clientVM_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/clientVM/main.tf) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_cluster_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/cluster/main.tf) - Deploys the EKS cluster where all the Azure Arc data services will be deployed.
- - [workers](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/modules/workers/main.tf) - Deploys the EKS cluster's worker nodes.
-
-- User remotes into client Windows VM, which automatically kicks off the DataServicesLogonScript PowerShell script that deploy and configure Azure Arc-enabled data services on the EKS cluster including the data controller and PostgreSQL.
-
-- In addition to deploying the data controller and PostgreSQL, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, the Terraform plan will deploy an EKS cluster, the Azure Arc Data Controller and the PostgreSQL Instance on that cluster and an EC2 Windows Server 2022 Client instance.
-
-- Before running the Terraform plan, create a terraform.tfvars file in the root of the terraform folder and supply some values for your environment.
-
- ```HCL
- AWS_ACCESS_KEY_ID = "ZFTIFC443FTFDEZ5TKNR"
- AWS_SECRET_ACCESS_KEY = "fakeSecretValue1dfd343sd5712adfddjh"
- AWS_DEFAULT_REGION = "us-west-1"
- azureLocation = "eastus"
- spnClientId = "1414133c-9786-53a4-b231-f87c143ebdb1"
- spnClientSecret = "fakeSecretValue123458125712ahjeacjh"
- spnTenantId = "33572583-d294-5b56-c4e6-dcf9a297ec17"
- subscriptionId = "33987583-A984-5C87-T4e3-POf7a397ec17"
- resourceGroup = "Arc-Data-Demo"
- workspaceName = "la-arc-001"
- deploySQLMI = false
- SQLMIHA = false
- deployPostgreSQL = true
- customLocationObjectId = "649cb28f-bc13-492a-9470-c8bf01fa8eeb"
- ```
-
-- Variable reference:
-
- - **_`AWS_ACCESS_KEY_ID`_** - Your AWS access key.
- - **_`AWS_SECRET_ACCESS_KEY`_** - Your AWS secret access key.
- - **_`AWS_DEFAULT_REGION`_** - AWS location code (e.g. 'us-west-1', 'us-east-2', etc.).
- - **_`azureLocation`_** - Azure location code (e.g. 'eastus', 'westus2', etc.).
- - **_`spnClientId`_** - Your Azure service principal id.
- - **_`spnClientSecret`_** - Your Azure service principal secret.
- - **_`spnTenantId`_** - Your Azure tenant id.
- - **_`subscriptionId`_** - Your Azure subscription Id.
- - **_`resourceGroup`_** - Resource group which will contain all of the Azure Arc data services resources.
- - **_`workspaceName`_** - Unique name for the ArcBox Log Analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller vanilla scenario we leave it set to _**false**_.
- - **_`customLocationObjectId`_** - The Azure AD application used by Azure Arc service retrieved in the prerequisites section.
-
-> **NOTE: Any variables in bold are required. If any optional parameters are not provided, defaults will be used.**
-
-- Now you will deploy the Terraform file. Navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform) and run the commands below:
-
- ```shell
- terraform init
- terraform plan -out=infra.out
- terraform apply "infra.out"
- ```
-
-> **NOTE: The deployment time for this scenario can take ~20-35min**
-
-- Example output from `terraform init`:
-
- ![Screenshot showing creating the terraform init command output](./14.png)
-
-- Example output from `terraform plan -out=infra.out`:
-
- ![Screenshot showing creating the terraform plan command output](./15.png)
-
-- Once completed, the plan will output a decrypted password for your Windows Client instance that you will use to RDP into it. Before connecting to the Client instance, you can review the EKS cluster and the EC2 instances created. Notice how 4 instances were created; 3 EKS nodes and the Client instance.
-
- ![Screenshot showing creating the terraform apply command output](./16.png)
-
- ![Screenshot showing creating the new EKS cluster](./17.png)
-
- ![Screenshot showing creating the new EKS cluster](./18.png)
-
- ![Screenshot showing creating the new EC2 instances](./19.png)
-
- ![Screenshot showing creating the new EC2 instances](./20.png)
-
- ![Screenshot showing creating the new EC2 instances](./21.png)
-
-## Windows Login & Post Deployment
-
-- Now that the first phase of the automation is completed, it is time to RDP to the client VM. Select the Windows instance, click *"Connect"* and download the Remote Desktop file.
-
- ![Screenshot showing starting an RDP session to the Client instance](./22.png)
-
- ![Screenshot showing starting an RDP session to the Client instance](./23.png)
-
-- Using the decrypted password generated from the plan output, RDP the Windows instance. In case you need to get the password later, use the ```terraform output``` command to re-present the plan output.
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/eks/terraform/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller will be deployed on the cluster and be ready to use.
-
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the PowerShell logon script run](./26.png)
-
- ![Screenshot showing the PowerShell logon script run](./27.png)
-
- ![Screenshot showing the PowerShell logon script run](./28.png)
-
- ![Screenshot showing the PowerShell logon script run](./29.png)
-
- ![Screenshot showing the PowerShell logon script run](./30.png)
-
- ![Screenshot showing the PowerShell logon script run](./31.png)
-
- ![Screenshot showing the PowerShell logon script run](./32.png)
-
- ![Screenshot showing the PowerShell logon script run](./33.png)
-
- ![Screenshot showing the PowerShell logon script run](./34.png)
-
- ![Screenshot showing the post-run desktop](./35.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and PostgreSQL Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **5 various Azure resources deployed**.
-
- - _Azure Arc-enabled Kubernetes cluster_ - Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - _Custom location_ - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - _Azure Arc Data Controller_ - The data controller that is now deployed on the Kubernetes cluster.
-
- - _Azure Arc-enabled PostgreSQL_ - The PostgreSQL instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./36.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./37.png)
-
- ![Screenshot showing Azure Data Studio extensions](./38.png)
-
-- Additionally, the PostgreSQL connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio SQL MI connection](./39.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./40.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./41.png)
-
-## Delete the deployment
-
-- If you want to delete the entire Azure environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./42.png)
-
-- If you want to delete the entire environment, use the _`terraform destroy`_ to delete all of the AWS resources.
-
- ```shell
- terraform destroy --auto-approve
- ```
-
- ![Screenshot showing the deletion of all AWS resources](./43.png)
-
- > **NOTE: Because the following resources were created by EKS that creates internal AWS dependencies that Terraform has no knowledge of from our plan, we need to delete the resources from AWS console as `terraform destroy` is cleaning up - this allows us to avoid dependency conflicts and ongoing billing from orphaned resources such as EKS Volumes.**
-
-- While the `destroy` command is running, delete any new Load Balancers created as EKS Services (`EC2 > Load Balancing > Load Balancers`) that are deployed in AWS from the Console:
-
- ![Screenshot showing the Deletion of Load Balancers](./44.png)
-
-- While the `destroy` command is running, delete any new Elastic Block Stores, created as EKS Persistent Volumes (`EC2 > Elastic Block Store > Volumes`) that are deployed in AWS from the Console:
-
- ![Screenshot showing the Deletion of Elastic Block Stores](./45.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/gke/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/gke/_index.md
deleted file mode 100644
index 6e83462da6..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/gke/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Google Kubernetes Engine"
-linkTitle: "Google Kubernetes Engine"
-weight: 5
-description: >-
- If you are working in a multi-cloud environment, the scenario in this section will guide on creating a Google Kubernetes Engine (GKE) with Azure Arc-enabled data services integration in an automated fashion using Terraform.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_dc_vanilla_terraform/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_dc_vanilla_terraform/_index.md
deleted file mode 100644
index 046885ca51..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_dc_vanilla_terraform/_index.md
+++ /dev/null
@@ -1,375 +0,0 @@
----
-type: docs
-title: "Data Controller Terraform plan"
-linkTitle: "Data Controller Terraform plan"
-weight: 1
-description: >
----
-
-## Deploy an Azure Arc Data Controller (Vanilla) on GKE using Terraform
-
-The following scenario will guide you on how to deploy a "Ready to Go" environment so you can deploy Azure Arc Data Services on a [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) cluster using [Terraform](https://www.terraform.io/).
-
-By the end of this scenario, you will have a GKE cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) GKE compute instance VM installed and pre-configured with all the required tools needed to work with Azure Arc Data Services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Deployment Process Overview
-
-- Create a Google Cloud Platform (GCP) project, IAM Role & Service Account
-- Download credentials file
-- Clone the Azure Arc Jumpstart repository
-- Create the .tfvars file with your variables values
-- Export the *TF_VAR_CL_OID* variable
-- *terraform init*
-- *terraform apply*
-- User remotes into sidecar Windows VM, which automatically kicks off the [DataServicesLogonScript](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/gke/terraform/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploys and configures Azure Arc-enabled data services on the GKE cluster.
-- *kubectl delete namespace arc*
-- *terraform destroy*
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 or higher](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- Google Cloud account with billing enabled - [Create a free trial account](https://cloud.google.com/free). To create Windows Server virtual machines, you must upgraded your account to enable billing. Click Billing from the menu and then select Upgrade in the lower right.
-
- ![Screenshot showing how to enable billing on GCP account](./01.png)
-
- ![Screenshot showing how to enable billing on GCP account](./02.png)
-
- ![Screenshot showing how to enable billing on GCP account](./03.png)
-
- ***Disclaimer*** - **To prevent unexpected charges, please follow the "Delete the deployment" section at the end of this README**
-
-- [Install Terraform 1.0 or higher](https://learn.hashicorp.com/terraform/getting-started/install.html)
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, and logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- SP_OID=$(az ad sp show --id $SP_CLIENT_ID --query id -o tsv)
-
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Create a new GCP Project, IAM Role & Service Account. In order to deploy resources in GCP, we will create a new GCP Project as well as a service account to allow Terraform to authenticate against GCP APIs and run the plan to deploy resources.
-
- - Browse to and login with your Google Cloud account. Once logged in, click on Select a project
-
- ![GCP new project](./04.png)
-
- - [Create a new project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) named "Azure Arc Demo".
-
- ![GCP new project](./05.png)
-
- ![GCP new project](./06.png)
-
- - After creating it, be sure to copy down the project id as it is usually different then the project name.
-
- ![GCP new project](./07.png)
-
- - Search Compute Engine API for the project
-
- ![Enable Compute Engine API](./08.png)
-
- - Enable Compute Engine API for the project
-
- ![Enable Compute Engine API](./09.png)
-
- - Create credentials for your project
-
- ![Add credentials](./10.png)
-
- - Create a project Owner service account credentials and download the private key JSON file and copy the file to the directory where Terraform files are located. Change the JSON file name (for example *account.json*). The Terraform plan will be using the credentials stored in this file to authenticate against your GCP project.
-
- ![Add credentials](./11.png)
-
- ![Add credentials](./12.png)
-
- ![Add credentials](./13.png)
-
- ![Add credentials](./14.png)
-
- ![Create private key](./15.png)
-
- ![Create private key](./16.png)
-
- ![Create private key](./17.png)
-
- ![Create private key](./18.png)
-
- ![account.json](./19.png)
-
- - Search Kubernetes Engine API for the project
-
- ![Enable the Kubernetes Engine API](./20.png)
-
- - Enable Kubernetes Engine API for the project
-
- ![Enable the Kubernetes Engine API](./21.png)
-
-## Automation Flow
-
-Read the below explanation to get familiar with the automation and deployment flow.
-
-- User creates the terraform variables file (_terraform.tfvars_) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) OID variable. The variable values are used throughout the deployment.
-
-- User deploys the Terraform plan which will deploy the GKE cluster and the GCP compute instance VM as well as an Azure resource group. The Azure resource group is required to host the Azure Arc services such as the Azure Arc-enabled Kubernetes cluster, the custom location, the Azure Arc data controller, and any database services you deploy on top of the data controller.
-
- > **NOTE: Depending on the GCP region, make sure you do not have any [SSD quota limit in the region](https://cloud.google.com/compute/quotas), otherwise, the Azure Arc Data Controller kubernetes resources will fail to deploy.**
-
-- As part of the Windows Server 2022 VM deployment, there are 4 script executions:
-
- 1. *azure_arc.ps1* script will be created automatically as part of the Terraform plan runtime and is responsible on injecting the terraform variables values on to the Windows instance which will then be used in both the *ClientTools* and the *LogonScript* scripts.
-
- 2. *password_reset.ps1* script will be created automatically as part of the Terraform plan runtime and is responsible on creating the Windows username & password.
-
- 3. *Bootstrap.ps1* script will run at the Terraform plan runtime Runtime and will:
- - Create the *Bootstrap.log* file
- - Install the required tools – az cli, PowerShell module, kubernetes-cli, Visual C++ Redistributable, HELM, VS Code, etc. (Chocolaty packages)
- - Download Azure Data Studio & Azure Data CLI
- - Disable Windows Server Manager, remove Internet Explorer, disable Windows Firewall
- - Download the DataServicesLogonScript.ps1 PowerShell script
- - Create the Windows schedule task to run the DataServicesLogonScript at first login
-
- 4. *DataServicesLogonScript.ps1* script will run on user first logon to Windows and will:
- - Create the *DataServicesLogonScript.log* file
- - Install the Azure Data Studio Azure Data CLI, Azure Arc & PostgreSQL extensions
- - Create the Azure Data Studio desktop shortcut
- - Use Azure CLI to connect the GKE cluster to Azure as an Azure Arc-enabled Kubernetes cluster
- - Create a custom location for use with the Azure Arc-enabled Kubernetes cluster
- - Deploy an ARM template that will deploy the Azure Arc data controller on the GKE cluster
- - Open another Powershell session which will execute a command to watch the deployed Azure Arc Data Controller Kubernetes pods
- - Unregister the logon script Windows schedule task so it will not run after first login
-
-## Terraform variables
-
-- Before running the Terraform plan, create the _terraform.tfvars_ file in the root of the terraform folder and supply some values for your environment.
-
- ```HCL
- gcp_project_id = "azure-arc-demo-277620"
- gcp_credentials_filename = "account.json"
- gcp_region = "us-west1"
- gcp_zone = "us-west1-a"
- gke_cluster_name = "arc-data-gke"
- admin_username = "arcdemo"
- admin_password = "ArcDemo1234567!!"
- windows_username = "arcdemo"
- windows_password = "Passw0rd123!!"
- SPN_CLIENT_ID = "33333333-XXXX-YYYY-XXXX-YTYTYTYT"
- SPN_CLIENT_SECRET = "33333333-XXXX-YTYT-9c21-7777777777"
- SPN_TENANT_ID = "33333333-XXXX-41af-1111-7777777777"
- SPN_AUTHORITY = "https://login.microsoftonline.com"
- AZDATA_USERNAME = "arcdemo"
- AZDATA_PASSWORD = "Arcdemo123!!"
- ARC_DC_NAME = "arcdatactrl"
- ARC_DC_SUBSCRIPTION = "32323232-XXXXX-YYYYY-9e8f-88888888888"
- ARC_DC_RG = "Arc-Data-GKE-Demo"
- ARC_DC_REGION = "eastus"
- deploy_SQLMI = false
- SQLMIHA = false
- deploy_PostgreSQL = false
- templateBaseUrl = "https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/gke/terraform/"
- MY_IP = "192.168.10.10"
- ```
-
-- Variable reference:
-
- - **_`gcp_project_id`_** - Your GCP Project ID (Created in the prerequisites section)
- - **_`gcp_credentials_filename`_** - Your GCP Credentials JSON filename (Created in the prerequisites section)
- - **_`gcp_region`_** - GCP region where resource will be created
- - **_`gcp_zone`_** - GCP zone where resource will be created
- - **_`gke_cluster_name`_** - GKE cluster name
- - **_`admin_username`_** - GKE cluster administrator username
- - **_`admin_password`_** - GKE cluster administrator password
- - **_`windows_username`_** - Windows Server Client compute instance VM administrator username
- - **_`windows_password`_** - Windows Server Client compute instance VM administrator password (The password must be at least 8 characters long and contain characters from three of the following four sets: uppercase letters, lowercase letters, numbers, and symbols as well as **not containing** the user's account name or parts of the user's full name that exceed two consecutive characters)
- - **_`SPN_CLIENT_ID`_** - Your Azure service principal name
- - **_`SPN_CLIENT_SECRET`_** - Your Azure service principal password
- - **_`SPN_TENANT_ID`_** - Your Azure tenant ID
- - **_`SPN_AUTHORITY`_** - _https://login.microsoftonline.com_ **Do not change**
- - **_`AZDATA_USERNAME`_** - Azure Arc Data Controller admin username
- - **_`AZDATA_PASSWORD`_** - Azure Arc Data Controller admin password (The password must be at least 8 characters long and contain characters from the following four sets: uppercase letters, lowercase letters, numbers, and symbols)
- - **_`ARC_DC_NAME`_** - Azure Arc Data Controller name (The name must consist of lowercase alphanumeric characters or '-', and must start and end with a alphanumeric character. This name will be used for k8s namespace as well)
- - **_`ARC_DC_SUBSCRIPTION`_** - Azure Arc Data Controller Azure subscription ID
- - **_`ARC_DC_RG`_** - Azure resource group where all future Azure Arc resources will be deployed
- - **_`ARC_DC_REGION`_** - Azure location where the Azure Arc Data Controller resource will be created in Azure (Currently, supported regions supported are eastus, eastus2, centralus, westus2, westeurope, southeastasia)
- - **_`deploy_SQLMI`_** - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller only scenario we leave it set to false
- - **_`SQLMIHA`_** - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to false
- - **_`deploy_PostgreSQL`_** - Boolean that sets whether or not to deploy PostgreSQL, for this data controller only scenario we leave it set to false
- - **_`templateBaseUrl`_** - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well - e.g. `https://raw.githubusercontent.com/your--github--account/azure_arc/your--branch/azure_arc_data_jumpstart/gke/terraform/`
- - **_`MY_IP`_** - Your Client IP
-
-### Azure Custom Location Resource Provider (RP) and the Object ID (OID) environment variable
-
-- You will also need to get the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) OID and export it as an environment variable:
-
- > **NOTE: You need permissions to list all the service principals.**
-
- #### Option 1: Bash
-
- ```bash
- export TF_VAR_CL_OID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $Env:TF_VAR_CL_OID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-## Deployment
-
-> **NOTE: The GKE cluster will use 3 nodes of SKU "n1-standard-8".**
-
-As mentioned, the Terraform plan and automation scripts will deploy a GKE cluster, the Azure Arc Data Controller on that cluster and a Windows Server 2022 Client GCP compute instance.
-
-- Navigate to the folder that has Terraform binaries.
-
- ```shell
- cd azure_arc_data_jumpstart/gke/terraform/
- ```
-
-- Run the ```terraform init``` command which is used to initialize a working directory containing Terraform configuration files and load the required Terraform providers.
-
- ![terraform init](./22.png)
-
-- Run the ```terraform plan -out=infra.out``` command to make sure everything is configured properly.
-
- ![terraform plan](./23.png)
-
-- Run the ```terraform apply "infra.out"``` command and wait for the plan to finish. **Runtime for deploying all the GCP resources for this plan is ~20-30min.**
-
- ![terraform apply completed](./24.png)
-
-- Once completed, you can review the GKE cluster and the worker nodes resources as well as the GCP compute instance VM created.
-
- ![GKE cluster](./25.png)
-
- ![GKE cluster](./26.png)
-
- ![GCP VM instances](./27.png)
-
- ![GCP VM instances](./28.png)
-
-- In the Azure Portal, a new empty Azure resource group was created which will be used for Azure Arc Data Controller and the other data services you will be deploying in the future.
-
- ![New empty Azure resource group](./29.png)
-
-## Windows Login & Post Deployment
-
-Now that we have both the GKE cluster and the Windows Server Client instance created, it is time to login to the Client VM.
-
-- Select the Windows instance, click on the RDP dropdown and download the RDP file. Using your *windows_username* and *windows_password* credentials, log in to the VM.
-
- ![GCP Client VM RDP](./30.png)
-
- ![GCP Client VM RDP](./31.png)
-
-- At first login, as mentioned in the "Automation Flow" section, the DataServicesLogonScript.ps1 will get executed. This script was created as part of the automated deployment process.
-
- Let the script run its course and **do not close** the PowerShell session, this will be done for you once completed. You will notice that the Azure Arc Data Controller gets deployed on the GKE cluster. **The logon script run time is approximately 1h long**.
-
- Once the script finishes, the logon script PowerShell session will be close and the Azure Arc Data Controller will be deployed on the GKE cluster and be ready to use.
-
- ![PowerShell login script run](./32.png)
-
- ![PowerShell login script run](./33.png)
-
- ![PowerShell login script run](./34.png)
-
- ![PowerShell login script run](./35.png)
-
- ![PowerShell login script run](./36.png)
-
- ![PowerShell login script run](./37.png)
-
- ![PowerShell login script run](./38.png)
-
- ![PowerShell login script run](./39.png)
-
- ![PowerShell login script run](./40.png)
-
- ![PowerShell login script run](./41.png)
-
- ![PowerShell login script run](./42.png)
-
- ![PowerShell login script run](./43.png)
-
- ![PowerShell login script run](./44.png)
-
- ![PowerShell login script run](./45.png)
-
- ![PowerShell login script run](./46.png)
-
-- When the scripts are complete, all PowerShell windows will close.
-
- ![PowerShell login script run](./47.png)
-
-- From Azure Portal, navigate to the resource group and confirm that the Azure Arc-enabled Kubernetes cluster, the Azure Arc data controller resource and the Custom Location resource are present.
-
- ![Azure Portal showing data controller resource](./48.png)
-
-- Another tool automatically deployed is Azure Data Studio along with the *Azure Data CLI*, the *Azure Arc* and the *PostgreSQL* extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see both extensions.
-
- ![Azure Data Studio shortcut](./49.png)
-
- ![Azure Data Studio extension](./50.png)
-
-## Delete the deployment
-
-To completely delete the environment, follow the below steps.
-
-- Delete the data services resources by using kubectl. Run the below command from a PowerShell window on the client VM.
-
- ```shell
- kubectl delete namespace arc
- ```
-
- ![Delete database resources](./51.png)
-
-- Use terraform to delete all of the GCP resources as well as the Azure resource group. **The *terraform destroy* run time is approximately ~5-6min long**.
-
- ```shell
- terraform destroy --auto-approve
- ```
-
- ![terraform destroy](./52.png)
-
-
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_mssql_mi_terraform/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_mssql_mi_terraform/_index.md
deleted file mode 100644
index c50d7c29b7..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_mssql_mi_terraform/_index.md
+++ /dev/null
@@ -1,443 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance Terraform Plan"
-linkTitle: "SQL Managed Instance Terraform Plan"
-weight: 2
-description: >
----
-
-## Deploy an Azure Arc-enabled SQL Managed Instance on GKE using a Terraform plan
-
-The following scenario will guide you on how to deploy a "Ready to Go" environment so you can deploy Azure Arc-enabled data services on a [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) cluster using [Terraform](https://www.terraform.io/).
-
-By the end of this scenario, you will have a GKE cluster deployed with an Azure Arc Data Controller ([in "Directly Connected" mode](https://docs.microsoft.com/azure/azure-arc/data/connectivity)), Azure SQL Managed Instance with a sample database and a Microsoft Windows Server 2022 (Datacenter) GKE compute instance VM installed and pre-configured with all the required tools needed to work with Azure Arc data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Deployment Process Overview
-
-- Create a Google Cloud Platform (GCP) project, IAM Role & Service Account
-- Download GCP credentials file
-- Clone the Azure Arc Jumpstart repository
-- Create the .tfvars file with your variables values
-- Export the *TF_VAR_CL_OID* variable
-- *terraform init*
-- *terraform apply*
-- User remotes into client Windows VM, which automatically kicks off the [DataServicesLogonScript](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/gke/terraform/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploys and configures Azure Arc-enabled data services on the GKE cluster.
-- Open Azure Data Studio and connect to SQL MI instance and sample database
-- *kubectl delete namespace arc*
-- *terraform destroy*
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- Google Cloud account with billing enabled - [Create a free trial account](https://cloud.google.com/free). To create Windows Server virtual machines, you must upgraded your account to enable billing. Click Billing from the menu and then select Upgrade in the lower right.
-
- ![Screenshot showing how to enable billing on GCP account](./01.png)
-
- ![Screenshot showing how to enable billing on GCP account](./02.png)
-
- ![Screenshot showing how to enable billing on GCP account](./03.png)
-
- ***Disclaimer*** - **To prevent unexpected charges, please follow the "Delete the deployment" section at the end of this README**
-
-- [Install Terraform 1.0 or higher](https://learn.hashicorp.com/terraform/getting-started/install.html)
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, and logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- SP_OID=$(az ad sp show --id $SP_CLIENT_ID --query id -o tsv)
-
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Create a new GCP Project, IAM Role & Service Account. In order to deploy resources in GCP, we will create a new GCP Project as well as a service account to allow Terraform to authenticate against GCP APIs and run the plan to deploy resources.
-
- - Browse to and login with your Google Cloud account. Once logged in, click on Select a project
-
- ![GCP new project](./04.png)
-
- - [Create a new project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) named "Azure Arc Demo".
-
- ![GCP new project](./05.png)
-
- ![GCP new project](./06.png)
-
- - After creating it, be sure to copy down the project id as it is usually different then the project name.
-
- ![GCP new project](./07.png)
-
- - Search Compute Engine API for the project
-
- ![Enable Compute Engine API](./08.png)
-
- - Enable Compute Engine API for the project
-
- ![Enable Compute Engine API](./09.png)
-
- - Create credentials for your project
-
- ![Add credentials](./10.png)
-
- - Create a project Owner service account credentials and download the private key JSON file and copy the file to the directory where Terraform files are located. Change the JSON file name (for example *account.json*). The Terraform plan will be using the credentials stored in this file to authenticate against your GCP project.
-
- ![Add credentials](./11.png)
-
- ![Add credentials](./12.png)
-
- ![Add credentials](./13.png)
-
- ![Add credentials](./14.png)
-
- ![Create private key](./15.png)
-
- ![Create private key](./16.png)
-
- ![Create private key](./17.png)
-
- ![Create private key](./18.png)
-
- ![account.json](./19.png)
-
- - Search Kubernetes Engine API for the project
-
- ![Enable the Kubernetes Engine API](./20.png)
-
- - Enable Kubernetes Engine API for the project
-
- ![Enable the Kubernetes Engine API](./21.png)
-
-## Automation Flow
-
-Read the below explanation to get familiar with the automation and deployment flow.
-
-- User creates the terraform variables file (_terraform.tfvars_) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) OID variable. The variable values are used throughout the deployment.
-
-- User deploys the Terraform plan which will deploy a GKE cluster and compute instance VM as well as an Azure resource group. The Azure resource group is required to host the Azure Arc services such as the Azure Arc-enabled Kubernetes cluster, the custom location, the Azure Arc data controller, and the SQL MI database service.
-
- > **NOTE: Depending on the GCP region, make sure you do not have any [SSD quota limit in the region](https://cloud.google.com/compute/quotas), otherwise, the Azure Arc Data Controller kubernetes resources will fail to deploy.**
-
-- As part of the Windows Server 2022 VM deployment, there are 4 script executions:
-
- 1. *azure_arc.ps1* script will be created automatically as part of the Terraform plan runtime and is responsible for injecting the terraform variable values as environment variables on the Windows instance which will then be used in both the *ClientTools* and the *LogonScript* scripts.
-
- 2. *password_reset.ps1* script will be created automatically as part of the Terraform plan runtime and is responsible for creating the Windows username and password.
-
- 3. *Bootstrap.ps1* script will run during Terraform plan runtime and will:
- - Create the *Bootstrap.log* file
- - Install the required tools – az cli, PowerShell module, kubernetes-cli, Visual C++ Redistributable (Chocolaty packages)
- - Download Azure Data Studio & Azure Data CLI
- - Disable Windows Server Manager, remove Internet Explorer, disable Windows Firewall
- - Download the DataServicesLogonScript.ps1 PowerShell script
- - Create the Windows schedule task to run the DataServicesLogonScript at first login
-
- 4. *DataServicesLogonScript.ps1* script will run on first login to Windows and will:
- - Create the *DataServicesLogonScript.log* file
- - Install the Azure Data Studio Azure Data CLI, Azure Arc and PostgreSQL extensions
- - Create the Azure Data Studio desktop shortcut
- - Use Azure CLI to connect the GKE cluster to Azure as an Azure Arc-enabled Kubernetes cluster
- - Create a custom location for use with the Azure Arc-enabled Kubernetes cluster
- - Open another Powershell session which will execute a command to watch the deployed Azure Arc Data Controller Kubernetes pods
- - Deploy an ARM template that will deploy the Azure Arc data controller on the GKE cluster
- - Execute a secondary *DeploySQLMI.ps1* script which will configure the SQL MI instance, download and install the sample Adventureworks database, and configure Azure Data Studio to connect to the SQL MI database instance
- - Unregister the logon script Windows scheduler task so it will not run after first login
-
-## Terraform variables
-
-- Before running the Terraform plan, create the _terraform.tfvars_ file in the root of the terraform folder and supply some values for your environment.
-
- ```HCL
- gcp_project_id = "azure-arc-demo-277620"
- gcp_credentials_filename = "account.json"
- gcp_region = "us-west1"
- gcp_zone = "us-west1-a"
- gke_cluster_name = "arc-data-gke"
- admin_username = "arcdemo"
- admin_password = "ArcDemo1234567!!"
- windows_username = "arcdemo"
- windows_password = "Passw0rd123!!"
- SPN_CLIENT_ID = "33333333-XXXX-YYYY-XXXX-YTYTYTYT"
- SPN_CLIENT_SECRET = "33333333-XXXX-YTYT-9c21-7777777777"
- SPN_TENANT_ID = "33333333-XXXX-41af-1111-7777777777"
- SPN_AUTHORITY = "https://login.microsoftonline.com"
- AZDATA_USERNAME = "arcdemo"
- AZDATA_PASSWORD = "Arcdemo123!!"
- ARC_DC_NAME = "arcdatactrl"
- ARC_DC_SUBSCRIPTION = "32323232-XXXXX-YYYYY-9e8f-88888888888"
- ARC_DC_RG = "Arc-Data-GKE-Demo"
- ARC_DC_REGION = "eastus"
- deploy_SQLMI = true
- SQLMIHA = true
- deploy_PostgreSQL = false
- templateBaseUrl = "https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/gke/terraform/"
- MY_IP = "192.168.10.10"
- ```
-
-- Variable reference:
-
- - **_`gcp_project_id`_** - Your GCP Project ID (Created in the prerequisites section)
- - **_`gcp_credentials_filename`_** - Your GCP Credentials JSON filename (Created in the prerequisites section)
- - **_`gcp_region`_** - GCP region where resource will be created
- - **_`gcp_zone`_** - GCP zone where resource will be created
- - **_`gke_cluster_name`_** - GKE cluster name
- - **_`admin_username`_** - GKE cluster administrator username
- - **_`admin_password`_** - GKE cluster administrator password
- - **_`windows_username`_** - Windows Server Client compute instance VM administrator username
- - **_`windows_password`_** - Windows Server Client compute instance VM administrator password (The password must be at least 8 characters long and contain characters from three of the following four sets: uppercase letters, lowercase letters, numbers, and symbols as well as **not containing** the user's account name or parts of the user's full name that exceed two consecutive characters)
- - **_`SPN_CLIENT_ID`_** - Your Azure service principal name
- - **_`SPN_CLIENT_SECRET`_** - Your Azure service principal password
- - **_`SPN_TENANT_ID`_** - Your Azure tenant ID
- - **_`SPN_AUTHORITY`_** - _https://login.microsoftonline.com_ **Do not change**
- - **_`AZDATA_USERNAME`_** - Azure Arc Data Controller admin username
- - **_`AZDATA_PASSWORD`_** - Azure Arc Data Controller admin password (The password must be at least 8 characters long and contain characters from the following four sets: uppercase letters, lowercase letters, numbers, and symbols)
- - **_`ARC_DC_NAME`_** - Azure Arc Data Controller name (The name must consist of lowercase alphanumeric characters or '-', and must start and end with a alphanumeric character. This name will be used for k8s namespace as well)
- - **_`ARC_DC_SUBSCRIPTION`_** - Azure Arc Data Controller Azure subscription ID
- - **_`ARC_DC_RG`_** - Azure resource group where all future Azure Arc resources will be deployed
- - **_`ARC_DC_REGION`_** - Azure location where the Azure Arc Data Controller resource will be created in Azure (Currently, supported regions supported are eastus, eastus2, centralus, westus2, westeurope, southeastasia)
- - **_`deploy_SQLMI`_** - Boolean that sets whether or not to deploy SQL Managed Instance, for this scenario we leave it set to true
- - **_`SQLMIHA`_** - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, set this to either true or false
- - **_`deploy_PostgreSQL`_** - Boolean that sets whether or not to deploy PostgreSQL, for this data controller only scenario we leave it set to false
- - **_`templateBaseUrl`_** - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well - e.g. `https://raw.githubusercontent.com/your--github--account/azure_arc/your--branch/azure_arc_data_jumpstart/gke/terraform/`
- - **_`MY_IP`_** - Your Client IP
-
-### Azure Custom Location Resource Provider (RP) and the Object ID (OID) environment variable
-
-- You also need to get the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) OID to export it as an environment variable:
-
- > **NOTE: You need permissions to list all the service principals.**
-
- #### Option 1: Bash
-
- ```bash
- export TF_VAR_CL_OID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $Env:TF_VAR_CL_OID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-## Deployment
-
-> **NOTE: The GKE cluster will use 3 nodes of SKU "n1-standard-8".**
-
-As mentioned, the Terraform plan and automation scripts will deploy a GKE cluster, the Azure Arc Data Controller on that cluster, a SQL Managed Instance with sample database, and a Windows Server 2022 Client GCP compute instance.
-
-- Navigate to the folder that has Terraform binaries.
-
- ```shell
- cd azure_arc_data_jumpstart/gke/terraform
- ```
-
-- Run the ```terraform init``` command which is used to initialize a working directory containing Terraform configuration files and load the required Terraform providers.
-
- ![terraform init](./22.png)
-
-- Run the ```terraform plan -out=infra.out``` command to make sure everything is configured properly.
-
- ![terraform plan](./23.png)
-
-- Run the ```terraform apply "infra.out"``` command and wait for the plan to finish. **Runtime for deploying all the GCP resources for this plan is ~20-30min.**
-
- ![terraform apply completed](./24.png)
-
-- Once completed, you can review the GKE cluster and the worker nodes resources as well as the GCP compute instance VM created.
-
- ![GKE cluster](./25.png)
-
- ![GKE cluster](./26.png)
-
- ![GCP VM instances](./27.png)
-
- ![GCP VM instances](./28.png)
-
-- In the Azure Portal, a new empty Azure resource group was created which will be used for Azure Arc Data Controller and the other data services you will be deploying in the future.
-
- ![New empty Azure resource group](./29.png)
-
-## Windows Login & Post Deployment
-
-Now that we have both the GKE cluster and the Windows Server Client instance created, it is time to login to the Client VM.
-
-- Select the Windows instance, click on the RDP dropdown and download the RDP file. Using your *windows_username* and *windows_password* credentials, log in to the VM.
-
- ![GCP Client VM RDP](./30.png)
-
- ![GCP Client VM RDP](./31.png)
-
-- At first login, as mentioned in the "Automation Flow" section, a logon script will get executed. This script was created as part of the automated deployment process.
-
- Let the script run its course and **do not close** the PowerShell session, this will be done for you once completed. You will notice that the Azure Arc Data Controller gets deployed on the GKE cluster. **The logon script run time is approximately 1h long**.
-
- Once the script finishes its run, the logon script PowerShell session will be closed and the Azure Arc Data Controller will be deployed on the GKE cluster and be ready to use.
-
- ![PowerShell login script run](./32.png)
-
- ![PowerShell login script run](./33.png)
-
- ![PowerShell login script run](./34.png)
-
- ![PowerShell login script run](./35.png)
-
- ![PowerShell login script run](./36.png)
-
- ![PowerShell login script run](./37.png)
-
- ![PowerShell login script run](./38.png)
-
- ![PowerShell login script run](./39.png)
-
- ![PowerShell login script run](./40.png)
-
- ![PowerShell login script run](./41.png)
-
- ![PowerShell login script run](./42.png)
-
- ![PowerShell login script run](./43.png)
-
- ![PowerShell login script run](./44.png)
-
- ![PowerShell login script run](./45.png)
-
- ![PowerShell login script run](./46.png)
-
-- When the scripts are complete, all PowerShell windows will close.
-
- ![PowerShell login script run](./47.png)
-
-- From Azure Portal, navigate to the resource group and confirm that the Azure Arc-enabled Kubernetes cluster, the Azure Arc data controller resource and the Custom Location resource are present.
-
- ![Azure Portal showing data controller resource](./48.png)
-
-- Another tool automatically deployed is Azure Data Studio along with the *Azure Data CLI*, the *Azure Arc* and the *PostgreSQL* extensions. Using the Desktop shortcut created for you, open Azure Data Studio and expand the SQL MI connection to see the Adventureworks sample database.
-
- ![Azure Data Studio shortcut](./49.png)
-
- ![Azure Data Studio extension](./50.png)
-
- ![Azure Data studio sample database](./51.png)
-
- ![Azure Data studio sample database](./52.png)
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance Stress Simulation
-
-Included in this scenario, is a dedicated SQL stress simulation tool named _SqlQueryStress_ automatically installed for you on the Client VM. _SqlQueryStress_ will allow you to generate load on the Azure Arc-enabled SQL Managed Instance that can be done used to showcase how the SQL database and services are performing as well to highlight operational practices described in the next section.
-
-- To start with, open the _SqlQueryStress_ desktop shortcut and connect to the SQL Managed Instance **primary** endpoint IP address. This can be found in the _SQLMI Endpoints_ text file desktop shortcut that was also created for you alongside the username and password you used to deploy the environment.
-
- ![Open SqlQueryStress](./53.png)
-
- ![SQLMI Endpoints text file](./54.png)
-
-> **NOTE: Secondary SQL Managed Instance endpoint will be available only when using the HA deployment model ("Business Critical").**
-
-- To connect, use "SQL Server Authentication" and select the deployed sample _AdventureWorks_ database (you can use the "Test" button to check the connection).
-
- ![SqlQueryStress connected](./55.png)
-
-- To generate some load, we will be running a simple stored procedure. Copy the below procedure and change the number of iterations you want it to run as well as the number of threads to generate even more load on the database. In addition, change the delay between queries to 1ms for allowing the stored procedure to run for a while.
-
- ```sql
- exec [dbo].[uspGetEmployeeManagers] @BusinessEntityID = 8
- ```
-
-- As you can see from the example below, the configuration settings are 100,000 iterations, five threads per iteration, and a 1ms delay between queries. These configurations should allow you to have the stress test running for a while.
-
- ![SqlQueryStress settings](./56.png)
-
- ![SqlQueryStress running](./57.png)
-
-### Azure Arc-enabled SQL Managed Instance monitoring using Grafana
-
-When deploying Azure Arc-enabled data services, a [Grafana](https://grafana.com/) instance is also automatically deployed on the same Kubernetes cluster and include built-in dashboards for both Kubernetes infrastructure as well SQL Managed Instance monitoring (PostgreSQL dashboards are included as well but we will not be covering these in this section).
-
-- Now that you have the _SqlQueryStress_ stored procedure running and generating load, we can look how this is shown in the the built-in Grafana dashboard. As part of the automation, a new URL desktop shortcut simply named "Grafana" was created.
-
- ![Grafana desktop shortcut](./58.png)
-
-- [Optional] The IP address for this instance represents the Kubernetes _LoadBalancer_ external IP that was provision as part of Azure Arc-enabled data services. Use the _```kubectl get svc -n arc```_ command to view the _metricsui_ external service IP address.
-
- ![metricsui Kubernetes service](./59.png)
-
-- To log in, use the same username and password that is in the _SQLMI Endpoints_ text file desktop shortcut.
-
- ![Grafana username and password](./60.png)
-
-- Navigate to the built-in "SQL Managed Instance Metrics" dashboard.
-
- ![Grafana dashboards](./61.png)
-
- ![Grafana "SQL Managed Instance Metrics" dashboard](./62.png)
-
-- Change the dashboard time range to "Last 5 minutes" and re-run the stress test using _SqlQueryStress_ (in case it was already finished).
-
- ![Last 5 minutes time range](./63.png)
-
-- You can now see how the SQL graphs are starting to show increased activity and load on the database instance.
-
- ![Increased load activity](./64.png)
-
- ![Increased load activity](./65.png)
-
-## Delete the deployment
-
-To completely delete the environment, follow the below steps.
-
-- Delete the data services resources by using kubectl. Run the below command from a PowerShell window on the client VM.
-
- ```shell
- kubectl delete namespace arc
- ```
-
- ![Delete database resources](./66.png)
-
-- Use terraform to delete all of the GCP resources as well as the Azure resource group. **The *terraform destroy* run time is approximately ~5-6min long**.
-
- ```shell
- terraform destroy --auto-approve
- ```
-
- ![terraform destroy](./67.png)
-
-
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_postgres_terraform/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_postgres_terraform/_index.md
deleted file mode 100644
index d0d7b37bbe..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/gke/gke_postgres_terraform/_index.md
+++ /dev/null
@@ -1,393 +0,0 @@
----
-type: docs
-title: "PostgreSQL Terraform Plan"
-linkTitle: "PostgreSQL Terraform Plan"
-weight: 3
-description: >
----
-
-## Deploy an Azure Arc-enabled PostgreSQL Deployment on GKE using a Terraform plan
-
-The following scenario will guide you on how to deploy a "Ready to Go" environment so you can deploy Azure Arc-enabled data services on a [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) cluster using [Terraform](https://www.terraform.io/).
-
-By the end of this scenario, you will have a GKE cluster deployed with an Azure Arc Data Controller ([in "Directly Connected" mode](https://docs.microsoft.com/azure/azure-arc/data/connectivity)), Azure Arc-enabled PostgreSQL server with a sample database and a Microsoft Windows Server 2022 (Datacenter) GKE compute instance VM installed & pre-configured with all the required tools needed to work with Azure Arc data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Deployment Process Overview
-
-- Create a Google Cloud Platform (GCP) project, IAM Role & Service Account
-- Download GCP credentials file
-- Clone the Azure Arc Jumpstart repository
-- Create the .tfvars file with your variables values
-- Export the *TF_VAR_CL_OID* variable
-- *terraform init*
-- *terraform apply*
-- User remotes into client Windows VM, which automatically kicks off the [DataServicesLogonScript](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/gke/terraform/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploys and configures Azure Arc-enabled data services on the GKE cluster.
-- Open Azure Data Studio and connect to Postgres instance and sample database
-- Run cleanup PowerShell script
-- *terraform destroy*
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- Google Cloud account with billing enabled - [Create a free trial account](https://cloud.google.com/free). To create Windows Server virtual machines, you must upgraded your account to enable billing. Click Billing from the menu and then select Upgrade in the lower right.
-
- ![Screenshot showing how to enable billing on GCP account](./01.png)
-
- ![Screenshot showing how to enable billing on GCP account](./02.png)
-
- ![Screenshot showing how to enable billing on GCP account](./03.png)
-
- ***Disclaimer*** - **To prevent unexpected charges, please follow the "Delete the deployment" section at the end of this README**
-
-- [Install Terraform 1.0 or higher](https://learn.hashicorp.com/terraform/getting-started/install.html)
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, and logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- SP_OID=$(az ad sp show --id $SP_CLIENT_ID --query id -o tsv)
-
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-- Enable subscription for the Microsoft.AzureArcData resource provider for Azure Arc-enabled data services. Registration is an asynchronous process, and registration may take approximately 10 minutes.
-
- ```shell
- az provider register --namespace Microsoft.AzureArcData
- ```
-
- You can monitor the registration process with the following commands:
-
- ```shell
- az provider show -n Microsoft.AzureArcData -o table
- ```
-
-- Create a new GCP Project, IAM Role & Service Account. In order to deploy resources in GCP, we will create a new GCP Project as well as a service account to allow Terraform to authenticate against GCP APIs and run the plan to deploy resources.
-
- - Browse to and login with your Google Cloud account. Once logged in, click on Select a project
-
- ![GCP new project](./04.png)
-
- - [Create a new project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) named "Azure Arc Demo".
-
- ![GCP new project](./05.png)
-
- ![GCP new project](./06.png)
-
- - After creating it, be sure to copy down the project id as it is usually different then the project name.
-
- ![GCP new project](./07.png)
-
- - Search Compute Engine API for the project
-
- ![Enable Compute Engine API](./08.png)
-
- - Enable Compute Engine API for the project
-
- ![Enable Compute Engine API](./09.png)
-
- - Create credentials for your project
-
- ![Add credentials](./10.png)
-
- - Create a project Owner service account credentials and download the private key JSON file and copy the file to the directory where Terraform files are located. Change the JSON file name (for example *account.json*). The Terraform plan will be using the credentials stored in this file to authenticate against your GCP project.
-
- ![Add credentials](./11.png)
-
- ![Add credentials](./12.png)
-
- ![Add credentials](./13.png)
-
- ![Add credentials](./14.png)
-
- ![Create private key](./15.png)
-
- ![Create private key](./16.png)
-
- ![Create private key](./17.png)
-
- ![Create private key](./18.png)
-
- ![account.json](./19.png)
-
- - Search Kubernetes Engine API for the project
-
- ![Enable the Kubernetes Engine API](./20.png)
-
- - Enable Kubernetes Engine API for the project
-
- ![Enable the Kubernetes Engine API](./21.png)
-
-## Automation Flow
-
-Read the below explanation to get familiar with the automation and deployment flow.
-
-- User creates the terraform variables file (_terraform.tfvars_) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) OID variable. The variable values are used throughout the deployment.
-
-- User deploys the Terraform plan which will deploy a GKE cluster and compute instance VM as well as an Azure resource group. The Azure resource group is required to host the Azure Arc services such as the Azure Arc-enabled Kubernetes cluster, the custom location, the Azure Arc data controller, and the PostgreSQL database service.
-
- > **NOTE: Depending on the GCP region, make sure you do not have any [SSD quota limit in the region](https://cloud.google.com/compute/quotas), otherwise, the Azure Arc Data Controller kubernetes resources will fail to deploy.**
-
-- As part of the Windows Server 2022 VM deployment, there are 4 script executions:
-
- 1. *azure_arc.ps1* script will be created automatically as part of the Terraform plan runtime and is responsible for injecting the terraform variable values as environment variables on the Windows instance which will then be used in both the *ClientTools* and the *LogonScript* scripts.
-
- 2. *password_reset.ps1* script will be created automatically as part of the Terraform plan runtime and is responsible for creating the Windows username and password.
-
- 3. *Bootstrap.ps1* script will run during Terraform plan runtime and will:
- - Create the *Bootstrap.log* file
- - Install the required tools – az cli, PowerShell module, kubernetes-cli, Visual C++ Redistributable (Chocolaty packages)
- - Download Azure Data Studio & Azure Data CLI
- - Disable Windows Server Manager, remove Internet Explorer, disable Windows Firewall
- - Download the DataServicesLogonScript.ps1 PowerShell script
- - Create the Windows schedule task to run the DataServicesLogonScript at first login
-
- 4. *DataServicesLogonScript.ps1* script will run on first login to Windows and will:
- - Create the *DataServicesLogonScript.log* file
- - Install the Azure Data Studio Azure Data CLI, Azure Arc and PostgreSQL extensions
- - Create the Azure Data Studio desktop shortcut
- - Use Azure CLI to connect the GKE cluster to Azure as an Azure Arc-enabled Kubernetes cluster
- - Create a custom location for use with the Azure Arc-enabled Kubernetes cluster
- - Open another Powershell session which will execute a command to watch the deployed Azure Arc Data Controller Kubernetes pods
- - Deploy an ARM template that will deploy the Azure Arc data controller on the GKE cluster
- - Execute a secondary *DeployPostgreSQL.ps1* script which will configure the PostgreSQL instance, download and install the sample Adventureworks database, and configure Azure Data Studio to connect to the PostgreSQL database instance
- - Unregister the logon script Windows scheduler task so it will not run after first login
-
-## Terraform variables
-
-- Before running the Terraform plan, create the _terraform.tfvars_ file in the root of the terraform folder and supply some values for your environment.
-
- ```HCL
- gcp_project_id = "azure-arc-demo-277620"
- gcp_credentials_filename = "account.json"
- gcp_region = "us-west1"
- gcp_zone = "us-west1-a"
- gke_cluster_name = "arc-data-gke"
- admin_username = "arcdemo"
- admin_password = "ArcDemo1234567!!"
- windows_username = "arcdemo"
- windows_password = "Passw0rd123!!"
- SPN_CLIENT_ID = "33333333-XXXX-YYYY-XXXX-YTYTYTYT"
- SPN_CLIENT_SECRET = "33333333-XXXX-YTYT-9c21-7777777777"
- SPN_TENANT_ID = "33333333-XXXX-41af-1111-7777777777"
- SPN_AUTHORITY = "https://login.microsoftonline.com"
- AZDATA_USERNAME = "arcdemo"
- AZDATA_PASSWORD = "Arcdemo123!!"
- ARC_DC_NAME = "arcdatactrl"
- ARC_DC_SUBSCRIPTION = "32323232-XXXXX-YYYYY-9e8f-88888888888"
- ARC_DC_RG = "Arc-Data-GKE-Demo"
- ARC_DC_REGION = "eastus"
- deploy_SQLMI = false
- SQLMIHA = false
- deploy_PostgreSQL = true
- templateBaseUrl = "https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/gke/terraform/"
- MY_IP = "192.168.10.10"
- ```
-
-- Variable reference:
-
- - **_`gcp_project_id`_** - Your GCP Project ID (Created in the prerequisites section)
- - **_`gcp_credentials_filename`_** - Your GCP Credentials JSON filename (Created in the prerequisites section)
- - **_`gcp_region`_** - GCP region where resource will be created
- - **_`gcp_zone`_** - GCP zone where resource will be created
- - **_`gke_cluster_name`_** - GKE cluster name
- - **_`admin_username`_** - GKE cluster administrator username
- - **_`admin_password`_** - GKE cluster administrator password
- - **_`windows_username`_** - Windows Server Client compute instance VM administrator username
- - **_`windows_password`_** - Windows Server Client compute instance VM administrator password (The password must be at least 8 characters long and contain characters from three of the following four sets: uppercase letters, lowercase letters, numbers, and symbols as well as **not containing** the user's account name or parts of the user's full name that exceed two consecutive characters)
- - **_`SPN_CLIENT_ID`_** - Your Azure service principal name
- - **_`SPN_CLIENT_SECRET`_** - Your Azure service principal password
- - **_`SPN_TENANT_ID`_** - Your Azure tenant ID
- - **_`SPN_AUTHORITY`_** - _https://login.microsoftonline.com_ **Do not change**
- - **_`AZDATA_USERNAME`_** - Azure Arc Data Controller admin username
- - **_`AZDATA_PASSWORD`_** - Azure Arc Data Controller admin password (The password must be at least 8 characters long and contain characters from the following four sets: uppercase letters, lowercase letters, numbers, and symbols)
- - **_`ARC_DC_NAME`_** - Azure Arc Data Controller name (The name must consist of lowercase alphanumeric characters or '-', and must start and end with a alphanumeric character. This name will be used for k8s namespace as well)
- - **_`ARC_DC_SUBSCRIPTION`_** - Azure Arc Data Controller Azure subscription ID
- - **_`ARC_DC_RG`_** - Azure resource group where all future Azure Arc resources will be deployed
- - **_`ARC_DC_REGION`_** - Azure location where the Azure Arc Data Controller resource will be created in Azure (Currently, supported regions supported are eastus, eastus2, centralus, westus2, westeurope, southeastasia)
- - **_`deploy_SQLMI`_** - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller only scenario we leave it set to false
- - **_`SQLMIHA`_** - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to false
- - **_`deploy_PostgreSQL`_** - Boolean that sets whether or not to deploy PostgreSQL, for this scenario we leave it set to true
- - **_`templateBaseUrl`_** - GitHub URL to the deployment template - filled in by default to point to [Microsoft/Azure Arc](https://github.com/microsoft/azure_arc) repository, but you can point this to your forked repo as well - e.g. `https://raw.githubusercontent.com/your--github--account/azure_arc/your--branch/azure_arc_data_jumpstart/gke/terraform/`
- - **_`MY_IP`_** - Your Client IP
-
-### Azure Custom Location Resource Provider (RP) and the Object ID (OID) environment variable
-
-- You will also need to get the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) OID and export it as an environment variable:
-
- > **NOTE: You need permissions to list all the service principals.**
-
- #### Option 1: Bash
-
- ```bash
- export TF_VAR_CL_OID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $Env:TF_VAR_CL_OID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-## Deployment
-
-> **NOTE: The GKE cluster will use 3 nodes of SKU "n1-standard-8".**
-
-As mentioned, the Terraform plan will deploy a GKE cluster, the Azure Arc Data Controller on that cluster, a Postgres instance with sample database, and a Windows Server 2022 Client GCP compute instance.
-
-- Navigate to the folder that has Terraform binaries.
-
- ```shell
- cd azure_arc_data_jumpstart/gke/terraform
- ```
-
-- Run the ```terraform init``` command which is used to initialize a working directory containing Terraform configuration files and load the required Terraform providers.
-
- ![terraform init](./22.png)
-
-- Run the ```terraform plan -out=infra.out``` command to make sure everything is configured properly.
-
- ![terraform plan](./23.png)
-
-- Run the ```terraform apply "infra.out"``` command and wait for the plan to finish. **Runtime for deploying all the GCP resources for this plan is ~20-30min.**
-
- ![terraform apply completed](./24.png)
-
-- Once completed, you can review the GKE cluster and the worker nodes resources as well as the GCP compute instance VM created.
-
- ![GKE cluster](./25.png)
-
- ![GKE cluster](./26.png)
-
- ![GCP VM instances](./27.png)
-
- ![GCP VM instances](./28.png)
-
-- In the Azure Portal, a new empty Azure resource group was created which will be used for Azure Arc Data Controller and the other data services you will be deploying in the future.
-
- ![New empty Azure resource group](./29.png)
-
-## Windows Login & Post Deployment
-
-Now that we have both the GKE cluster and the Windows Server Client instance created, it is time to login to the Client VM.
-
-- Select the Windows instance, click on the RDP dropdown and download the RDP file. Using your *windows_username* and *windows_password* credentials, log in to the VM.
-
- ![GCP Client VM RDP](./30.png)
-
- ![GCP Client VM RDP](./31.png)
-
-- At first login, as mentioned in the "Automation Flow" section, a logon script will get executed. This script was created as part of the automated deployment process.
-
- Let the script run its course and **do not close** the PowerShell session, this will be done for you once completed. You will notice that the Azure Arc Data Controller gets deployed on the GKE cluster. **The logon script run time is approximately 1h long**.
-
- Once the script finishes its run, the logon script PowerShell session will be closed and the Azure Arc Data Controller will be deployed on the GKE cluster and be ready to use.
-
- ![PowerShell login script run](./32.png)
-
- ![PowerShell login script run](./33.png)
-
- ![PowerShell login script run](./34.png)
-
- ![PowerShell login script run](./35.png)
-
- ![PowerShell login script run](./36.png)
-
- ![PowerShell login script run](./37.png)
-
- ![PowerShell login script run](./38.png)
-
- ![PowerShell login script run](./39.png)
-
- ![PowerShell login script run](./40.png)
-
- ![PowerShell login script run](./41.png)
-
- ![PowerShell login script run](./42.png)
-
- ![PowerShell login script run](./43.png)
-
- ![PowerShell login script run](./44.png)
-
- ![PowerShell login script run](./45.png)
-
- ![PowerShell login script run](./46.png)
-
- - When the scripts are complete, all PowerShell windows will close.
-
- ![PowerShell login script run](./47.png)
-
-- From Azure Portal, navigate to the resource group and confirm that the Azure Arc-enabled Kubernetes cluster, the Azure Arc data controller resource and the Custom Location resource are present.
-
- ![Azure Portal showing data controller resource](./48.png)
-
-- Another tool automatically deployed is Azure Data Studio along with the *Azure Data CLI*, the *Azure Arc* and the *PostgreSQL* extensions. Azure Data Studio will be opened automatically after the LoginScript is finished. In Azure Data Studio, you can connect to the Postgres instance and see the Adventureworks sample database.
-
- ![Azure Data Studio shortcut](./49.png)
-
- ![Azure Data Studio extension](./50.png)
-
- ![Azure Data studio sample database](./51.png)
-
- ![Azure Data studio sample database](./52.png)
-
-## Delete the deployment
-
-To completely delete the environment, follow the below steps.
-
-- Delete the data services resources by using kubectl. Run the below command from a PowerShell window on the client VM.
-
- ```shell
- kubectl delete namespace arc
- ```
-
- ![Delete database resources](./53.png)
-
-- Use terraform to delete all of the GCP resources as well as the Azure resource group. **The *terraform destroy* run time is approximately ~5-6min long**.
-
- ```shell
- terraform destroy --auto-approve
- ```
-
- ![terraform destroy](./54.png)
-
-
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/_index.md
deleted file mode 100644
index c7f852914b..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Kubernetes Kubeadm"
-linkTitle: "Kubernetes Kubeadm"
-weight: 7
-description: >-
- If you do not have a Kubernetes cluster, the scenarios in this section will guide on how to create a Kubernetes cluster using kubeadm in Azure VMs with Azure Arc-enabled data services integration in an automated fashion using ARM template.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_dc_vanilla_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_dc_vanilla_arm_template/_index.md
deleted file mode 100644
index b740289d5f..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_dc_vanilla_arm_template/_index.md
+++ /dev/null
@@ -1,288 +0,0 @@
----
-type: docs
-title: "Data Controller ARM Template"
-linkTitle: "Data Controller ARM Template"
-weight: 1
-description: >
----
-
-## Deploy a vanilla Azure Arc Data Controller in directly connected mode on Kubeadm Kubernetes cluster with Azure provider using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) deployed on [Kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/) Kubernetes cluster.
-
-By the end of this scenario, you will have a Kubeadm Kubernetes cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with the following Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, and logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/VNET.json) - Deploys a VNET and Subnet for Client and K8s VMs.
- - [_ubuntuKubeadm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/ubuntuKubeadm.json) - Deploys two Ubuntu Linux VMs which will be transformed into a
- - Kubeadm management cluster (a single control-plane and a single Worker node) using the [_installKubeadm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/installKubeadm.sh) and the [_installKubeadmWorker_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/installKubeadmWorker.sh) shell scripts. This Kubeadm cluster will be used by the rest of the Azure Arc-enabled data services automation deploy.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that creates a new Azure Arc-enabled Kubernetes cluster and configure Azure Arc-enabled data services on the Kubeadm Cluster including the Data Controller. Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom location](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller vanilla scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
-
-- You will also need to get the Azure Custom Location Resource Provider (RP) Object ID (OID) and export it as an environment variable. This is required to enable [Custom Location](https://learn.microsoft.com/azure/azure-arc/platform/conceptual-custom-locations) on your cluster.
-
- > **NOTE: You need permissions to list all the service principals.**
- #### Option 1: Bash
-
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/kubeadm/azure/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal.
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_dc_vanilla_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_dc_vanilla_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_dc_vanilla_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the post-run desktop](./24.png)
-
-- Since this scenario is onboarding your Kubernetes cluster with Arc and deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - Custom location - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - Azure Arc Data Controller - The data controller that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./25.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./26.png)
-
- ![Screenshot showing Azure Data Studio extensions](./27.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./28.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./29.png)
-
-### Exploring logs from the Client virtual machine
-
-Occasionally, you may need to review log output from scripts that run on the _Arc-Data-Client_, _Arc-Data-Kubeadm-MGMT-Master_ or _Arc-Data-Kubeadm-MGMT-Worker_ virtual machines in case of deployment failures. To make troubleshooting easier, the scenario deployment scripts collect all relevant logs in the _C:\Temp_ folder on _Arc-Data-Client_. A short description of the logs and their purpose can be seen in the list below:
-
-| Logfile | Description |
-| ------- | ----------- |
-| _C:\Temp\Bootstrap.log_ | Output from the initial bootstrapping script that runs on _Arc-Data-Client_. |
-| _C:\Temp\DataServicesLogonScript.log_ | Output of _DataServicesLogonScript.ps1_ which configures Azure Arc-enabled data services baseline capability. |
-| _C:\Temp\installKubeadm.log_ | Output from the custom script extension which runs on _Arc-Data-Kubeadm-MGMT-Master_ and configures the Kubeadm cluster Master Node. If you encounter ARM deployment issues with _ubuntuKubeadm.json_ then review this log. |
-| _C:\Temp\installKubeadmWorker.log_ | Output from the custom script extension which runs on _Arc-Data-Kubeadm-MGMT-Worker and configures the Kubeadm cluster Worker Node. If you encounter ARM deployment issues with _ubuntuKubeadm.json_ then review this log. |
-
-![Screenshot showing the Temp folder with deployment logs](./30.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./31.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_mssql_mi_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_mssql_mi_arm_template/_index.md
deleted file mode 100644
index c153676bd2..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_mssql_mi_arm_template/_index.md
+++ /dev/null
@@ -1,372 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance ARM Template"
-linkTitle: "SQL Managed Instance ARM Template"
-weight: 2
-description: >
----
-
-## Deploy Azure Arc-enabled SQL Managed Instance in directly connected mode on Kubeadm Kubernetes cluster with Azure provider using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on [Kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/) Kubernetes cluster.
-
-By the end of this scenario, you will have a Kubeadm Kubernetes cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with the following Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, and logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/VNET.json) - Deploys a VNET and Subnet for Client and K8s VMs.
- - [_ubuntuKubeadm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/ubuntuKubeadm.json) - Deploys two Ubuntu Linux VMs which will be transformed into a
- - Kubeadm management cluster (a single control-plane and a single Worker node) using the [_installKubeadm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/installKubeadm.sh) and the [_installKubeadmWorker_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/installKubeadmWorker.sh) shell scripts. This Kubeadm cluster will be used by the rest of the Azure Arc-enabled data services automation deploy.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that creates a new Azure Arc-enabled Kubernetes cluster and configure Azure Arc-enabled data services on the kubeadm workload cluster including the Data Controller. Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom Location](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
-- In addition to deploying the data controller and SQL Managed Instance, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this Azure Arc-enabled SQL Managed Instance scenario we will set it to _**true**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, set this to either _**true**_ or _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this scenario we leave it set to _**false**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- You will also need to get the Azure Custom Location Resource Provider (RP) Object ID (OID) and export it as an environment variable. This is required to enable [Custom Location](https://learn.microsoft.com/azure/azure-arc/platform/conceptual-custom-locations) on your cluster.
-
- > **NOTE: You need permissions to list all the service principals.**
- #### Option 1: Bash
-
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/kubeadm/azure/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal.
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_mssql_mi_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_mssql_mi_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_mssql_mi_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and the Azure Arc Data Controller and the SQL Managed Instance will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the PowerShell logon script run](./24.png)
-
- ![Screenshot showing the PowerShell logon script run](./25.png)
-
- ![Screenshot showing the post-run desktop](./26.png)
-
-
-- Since this scenario is onboarding your Kubernetes cluster with Arc and deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - Custom location - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - Azure Arc Data Controller - The data controller that is now deployed on the Kubernetes cluster.
-
- - Azure Arc-enabled SQL Managed Instance - The SQL Managed Instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./27.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./28.png)
-
- ![Screenshot showing Azure Data Studio extensions](./29.png)
-
-- Additionally, the SQL Managed Instance connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio SQL MI connection](./30.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./31.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./32.png)
-
-## High Availability with SQL Always-On availability groups
-
-Azure Arc-enabled SQL Managed Instance is deployed on Kubernetes as a containerized application and uses kubernetes constructs such as stateful sets and persistent storage to provide built-in health monitoring, failure detection, and failover mechanisms to maintain service health. For increased reliability, you can also configure Azure Arc-enabled SQL Managed Instance to deploy with extra replicas in a high availability configuration.
-
-For showcasing and testing SQL Managed Instance with [Always On availability groups](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-high-availability#deploy-with-always-on-availability-groups), a dedicated [Jumpstart scenario](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/aks/aks_mssql_ha/) is available to help you simulate failures and get hands-on experience with this deployment model.
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance stress simulation
-
-Included in this scenario, is a dedicated SQL stress simulation tool named _SqlQueryStress_ automatically installed for you on the Client VM. _SqlQueryStress_ will allow you to generate load on the Azure Arc-enabled SQL Managed Instance that can be done used to showcase how the SQL database and services are performing as well to highlight operational practices described in the next section.
-
-- To start with, open the _SqlQueryStress_ desktop shortcut and connect to the SQL Managed Instance **primary** endpoint IP address. This can be found in the _SQLMI Endpoints_ text file desktop shortcut that was also created for you alongside the username and password you used to deploy the environment.
-
- ![Screenshot showing opened SqlQueryStress](./33.png)
-
- ![Screenshot showing SQLMI Endpoints text file](./34.png)
-
-> **NOTE: Secondary SQL Managed Instance endpoint will be available only when using the [HA deployment model ("Business Critical")](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/day2/kubeadm/azure/capi_mssql_ha/).**
-
-- To connect, use "SQL Server Authentication" and select the deployed sample _AdventureWorks_ database (you can use the "Test" button to check the connection).
-
- ![Screenshot showing SqlQueryStress connected](./35.png)
-
-- To generate some load, we will be running a simple stored procedure. Copy the below procedure and change the number of iterations you want it to run as well as the number of threads to generate even more load on the database. In addition, change the delay between queries to 1ms for allowing the stored procedure to run for a while.
-
- ```sql
- exec [dbo].[uspGetEmployeeManagers] @BusinessEntityID = 8
- ```
-
-- As you can see from the example below, the configuration settings are 100,000 iterations, five threads per iteration, and a 1ms delay between queries. These configurations should allow you to have the stress test running for a while.
-
- ![Screenshot showing SqlQueryStress settings](./36.png)
-
- ![Screenshot showing SqlQueryStress running](./37.png)
-
-### Azure Arc-enabled SQL Managed Instance monitoring using Grafana
-
-When deploying Azure Arc-enabled data services, a [Grafana](https://grafana.com/) instance is also automatically deployed on the same Kubernetes cluster and include built-in dashboards for both Kubernetes infrastructure as well SQL Managed Instance monitoring (PostgreSQL dashboards are included as well but we will not be covering these in this section).
-
-- Now that you have the _SqlQueryStress_ stored procedure running and generating load, we can look how this is shown in the the built-in Grafana dashboard. As part of the automation, a new URL desktop shortcut simply named "Grafana" was created.
-
- ![Screenshot showing Grafana desktop shortcut](./38.png)
-
-- [Optional] The IP address for this instance represents the Kubernetes _LoadBalancer_ external IP that was provision as part of Azure Arc-enabled data services. Use the _`kubectl get svc -n arc`_ command to view the _metricsui_ external service IP address.
-
- ![Screenshot showing metricsui Kubernetes service](./39.png)
-
-- To log in, use the same username and password that is in the _SQLMI Endpoints_ text file desktop shortcut.
-
- ![Screenshot showing Grafana username and password](./40.png)
-
-- Navigate to the built-in "SQL Managed Instance Metrics" dashboard.
-
- ![Screenshot showing Grafana dashboards](./41.png)
-
- ![Screenshot showing Grafana "SQL Managed Instance Metrics" dashboard](./42.png)
-
-- Change the dashboard time range to "Last 5 minutes" and re-run the stress test using _SqlQueryStress_ (in case it was already finished).
-
- ![Screenshot showing "Last 5 minutes" time range](./43.png)
-
-- You can now see how the SQL graphs are starting to show increased activity and load on the database instance.
-
- ![Screenshot showing increased load activity](./44.png)
-
- ![Screenshot showing increased load activity](./45.png)
-
-### Exploring logs from the Client virtual machine
-
-Occasionally, you may need to review log output from scripts that run on the _Arc-Data-Client_, _Arc-Data-Kubeadm-MGMT-Master_ or _Arc-Data-Kubeadm-MGMT-Worker_ virtual machines in case of deployment failures. To make troubleshooting easier, the scenario deployment scripts collect all relevant logs in the _C:\Temp_ folder on _Arc-Data-Client_. A short description of the logs and their purpose can be seen in the list below:
-
-| Logfile | Description |
-| ------- | ----------- |
-| _C:\Temp\Bootstrap.log_ | Output from the initial bootstrapping script that runs on _Arc-Data-Client_. |
-| _C:\Temp\DataServicesLogonScript.log_ | Output of _DataServicesLogonScript.ps1_ which configures Azure Arc-enabled data services baseline capability. |
-| _C:\Temp\DeploySQLMI.log_ | Output of _deploySQL.ps1_ which deploys and configures SQL Managed Instance with Azure Arc. |
-| _C:\Temp\installKubeadm.log_ | Output from the custom script extension which runs on _Arc-Data-Kubeadm-MGMT-Master_ and configures the Kubeadm cluster Master Node. If you encounter ARM deployment issues with _ubuntuKubeadm.json_ then review this log. |
-| _C:\Temp\installKubeadmWorker.log_ | Output from the custom script extension which runs on _Arc-Data-Kubeadm-MGMT-Worker and configures the Kubeadm cluster Worker Node. If you encounter ARM deployment issues with _ubuntuKubeadm.json_ then review this log. |
-| _C:\Temp\SQLMIEndpoints.log_ | Output from _SQLMIEndpoints.ps1_ which collects the service endpoints for SQL MI and uses them to configure Azure Data Studio connection settings. |
-
-![Screenshot showing the Temp folder with deployment logs](./46.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Screenshot showing Azure resource group deletion](./47.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_postgresql_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_postgresql_arm_template/_index.md
deleted file mode 100644
index e6157344ff..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_postgresql_arm_template/_index.md
+++ /dev/null
@@ -1,298 +0,0 @@
----
-type: docs
-title: "PostgreSQL ARM Template"
-linkTitle: "PostgreSQL ARM Template"
-weight: 3
-description: >
----
-
-## Deploy Azure Arc-enabled PostgreSQL in directly connected mode on Cluster API Kubernetes cluster with Azure provider using an ARM Template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [PostgreSQL](https://docs.microsoft.com/azure/azure-arc/data/what-is-azure-arc-enabled-postgres-hyperscale)deployed on [Kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/) Kubernetes cluster.
-
-By the end of this scenario, you will have a Kubeadm Kubernetes cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) Azure client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal assigned with the following Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, and logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_ ARM template](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json) will initiate the deployment of the linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/VNET.json) - Deploys a VNET and Subnet for Client and K8s VMs.
- - [_ubuntuKubeadm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/ubuntuKubeadm.json) - Deploys two Ubuntu Linux VMs which will be transformed into a
- - Kubeadm management cluster (a single control-plane and a single Worker node) using the [_installKubeadm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/installKubeadm.sh) and the [_installKubeadmWorker_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/installKubeadmWorker.sh) shell scripts. This Kubeadm cluster will be used by the rest of the Azure Arc-enabled data services automation deploy.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/clientVm.json) - Deploys the client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/mgmtStagingStorage.json) - Used for staging files in automation scripts.
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs uploads.
-
-- User remotes into client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell script that creates a new Azure Arc-enabled Kubernetes cluster and configure Azure Arc-enabled data services on the kubeadm workload cluster including the Data Controller. Azure Arc-enabled data services deployed in directly connected are using this type of resource in order to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions) as well as for using Azure Arc [Custom Location](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
-- In addition to deploying the data controller and PostgreSQL, the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will restored automatically for you as well.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key
- - _`spnClientId`_ - Your Azure service principal id
- - _`spnClientSecret`_ - Your Azure service principal secret
- - _`spnTenantId`_ - Your Azure tenant id
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for the deployment log analytics workspace.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this scenario we leave it set to _**false**_.
- - _`SQLMIHA`_ - Boolean that sets whether or not to deploy SQL Managed Instance with high-availability (business continuity) configurations, for this scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this Azure Arc-enabled PostgreSQL scenario we will set it to _**true**_.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the client VM.
- - _`bastionHostName`_ - Azure Bastion host name.
-
-- You will also need to get the Azure Custom Location Resource Provider (RP) Object ID (OID) and export it as an environment variable. This is required to enable [Custom Location](https://learn.microsoft.com/azure/azure-arc/platform/conceptual-custom-locations) on your cluster.
-
- > **NOTE: You need permissions to list all the service principals.**
- #### Option 1: Bash
-
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/kubeadm/azure/ARM) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the _azuredeploy.parameters.json_ file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Demo \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once Azure resources has been provisioned, you will be able to see it in Azure portal.
-
- ![Screenshot showing ARM template deployment completed](./01.png)
-
- ![Screenshot showing the new Azure resource group with all resources](./02.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_postgresql_arm_template/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_postgresql_arm_template/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/kubeadm/kubeadm_azure_postgresql_arm_template/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client-NSG with blocked RDP](./03.png)
-
- ![Screenshot showing adding a new inbound security rule](./04.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./05.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./06.png)
-
- ![Screenshot showing connecting to the VM using RDP](./07.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./08.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./09.png)
-
- ![Screenshot showing connecting to the VM using JIT](./10.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/kubeadm/azure/ARM/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script to run its course and **do not close** the PowerShell session, this will be done for you once completed. Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and PostgreSQL will be deployed on the cluster and be ready to use.
-
- ![Screenshot showing the PowerShell logon script run](./11.png)
-
- ![Screenshot showing the PowerShell logon script run](./12.png)
-
- ![Screenshot showing the PowerShell logon script run](./13.png)
-
- ![Screenshot showing the PowerShell logon script run](./14.png)
-
- ![Screenshot showing the PowerShell logon script run](./15.png)
-
- ![Screenshot showing the PowerShell logon script run](./16.png)
-
- ![Screenshot showing the PowerShell logon script run](./17.png)
-
- ![Screenshot showing the PowerShell logon script run](./18.png)
-
- ![Screenshot showing the PowerShell logon script run](./19.png)
-
- ![Screenshot showing the PowerShell logon script run](./20.png)
-
- ![Screenshot showing the PowerShell logon script run](./21.png)
-
- ![Screenshot showing the PowerShell logon script run](./22.png)
-
- ![Screenshot showing the PowerShell logon script run](./23.png)
-
- ![Screenshot showing the post-run desktop](./24.png)
-
-- Since this scenario is onboarding your Kubernetes cluster with Arc and deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group. The important ones to notice are:
-
- - Custom location - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as target locations for deploying Azure services instances.
-
- - Azure Arc Data Controller - The data controller that is now deployed on the Kubernetes cluster.
-
- - Azure Arc-enabled PostgreSQL - The PostgreSQL instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing additional Azure resources in the resource group](./25.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./26.png)
-
- ![Screenshot showing Azure Data Studio extensions](./27.png)
-
-- Additionally, the PostgreSQL connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio PostgresSQL connection](./28.png)
-
-## Cluster extensions
-
-In this scenario, two Azure Arc-enabled Kubernetes cluster extensions were installed:
-
-- _azuremonitor-containers_ - The Azure Monitor Container Insights cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- _arc-data-services_ - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
-- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes cluster extensions settings](./29.png)
-
- ![Screenshot showing the Azure Arc-enabled Kubernetes installed extensions](./30.png)
-
-### Exploring logs from the Client virtual machine
-
-Occasionally, you may need to review log output from scripts that run on the _Arc-Data-Client_, _Arc-Data-Kubeadm-MGMT-Master_ or _Arc-Data-Kubeadm-MGMT-Worker_ virtual machines in case of deployment failures. To make troubleshooting easier, the scenario deployment scripts collect all relevant logs in the _C:\Temp_ folder on _Arc-Data-Client_. A short description of the logs and their purpose can be seen in the list below:
-
-| Logfile | Description |
-| ------- | ----------- |
-| _C:\Temp\Bootstrap.log_ | Output from the initial bootstrapping script that runs on _Arc-Data-Client_. |
-| _C:\Temp\DataServicesLogonScript.log_ | Output of _DataServicesLogonScript.ps1_ which configures Azure Arc-enabled data services baseline capability. |
-| _C:\Temp\DeployPostgreSQL.log_ | Output of _deployPostgreSQL.ps1_ which deploys and configures PostgreSQL with Azure Arc. |
-| _C:\Temp\installKubeadm.log_ | Output from the custom script extension which runs on _Arc-Data-Kubeadm-MGMT-Master_ and configures the Kubeadm cluster Master Node. If you encounter ARM deployment issues with _ubuntuKubeadm.json_ then review this log. |
-| _C:\Temp\installKubeadmWorker.log_ | Output from the custom script extension which runs on _Arc-Data-Kubeadm-MGMT-Worker and configures the Kubeadm cluster Worker Node. If you encounter ARM deployment issues with _ubuntuKubeadm.json_ then review this log. |
-
-![Screenshot showing the Temp folder with deployment logs](./31.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployment resource group from the Azure portal.
-
- ![Delete Azure resource group](./32.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/microk8s/_index.md
deleted file mode 100644
index 006d6877db..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "MicroK8s"
-linkTitle: "MicroK8s"
-weight: 6
-description: >-
- If you do not yet have a Kubernetes cluster, the scenarios in this section will guide on deploying Azure Arc-enabled data services on Microk8s in an automated fashion using ARM templates.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/dc_vanilla/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/dc_vanilla/_index.md
deleted file mode 100644
index c72dfc8edb..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/dc_vanilla/_index.md
+++ /dev/null
@@ -1,293 +0,0 @@
----
-type: docs
-title: "Data Controller ARM Template"
-linkTitle: "Data Controller ARM Template"
-weight: 1
-description: >
----
-
-## Deploy a vanilla Azure Arc Data Controller on a Microk8s Kubernetes cluster in an Azure VM using ARM template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) deployed on a single-node [Microk8s](https://microk8s.io/) Kubernetes cluster.
-
-By the end of this scenario, you will have a Microk8s Kubernetes cluster deployed with an Azure Arc Data Controller and a Microsoft Windows Server 2022 (Datacenter) Azure Client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- SP_OID=$(az ad sp show --id $SP_CLIENT_ID --query id -o tsv)
-
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the [Microk8s GitHub repo](https://github.com/ubuntu/microk8s):
-
-_"Microk8s is a single-package, fully conformant, lightweight Kubernetes that works on 42 flavors of Linux. Perfect for Developer workstations, IoT, Edge & CI/CD. MicroK8s tracks upstream and releases beta, RC and final bits the same day as upstream K8s."_
-
-in this scenario, we automate the installation of Microk8s on an Ubuntu 20.04 VM running on Azure using a few simple commands to install from the [Snap Store](https://snapcraft.io/microk8s), before proceeding to onboard it as an Azure Arc-enabled Kubernetes Cluster.
-
-Once our K8s Cluster is onboarded, we proceed to create a [Custom Location](https://docs.microsoft.com/azure/azure-arc/kubernetes/custom-locations), and deploy an Azure Arc Data Controller in [Directly Connected mode](https://docs.microsoft.com/azure/azure-arc/data/connectivity#connectivity-modes).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json) ARM template will initiate **five** linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/VNET.json) - Deploys a Virtual Network with a single subnet - used by our VMs.
- - [_ubuntuMicrok8s_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/ubuntuMicrok8s.json) - Deploys an Ubuntu Linux VM which will have Microk8s installed from the Snap Store.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/clientVm.json) - Deploys the Client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/mgmtStagingStorage.json) - Used for staging files in automation scripts and [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/).
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs upload.
-
-- User remotes into Client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploys and configure Azure Arc-enabled data services on the Microk8s Kubernetes cluster - including the data controller.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.parameters.example.json).
-
- - _`sshRSAPublicKey`_ - Your SSH public key - sample syntax: `ssh-rsa AAAAB3N...NDOCE7U3DLBISw==\n`.
- - _`spnClientId`_ - Your Azure service principal id.
- - _`spnClientSecret`_ - Your Azure service principal secret.
- - _`spnTenantId`_ - Your Azure tenant id.
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name.
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for log analytics workspace deployment.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller only scenario we leave it set to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller only scenario we leave it set to _**false**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [_microsoft/azure_arc_](https://github.com/microsoft/azure_arc) GitHub repository, but you can point this to your forked repo as well.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion.
-
-- You will also need to get the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) and export it as an environment variable:
-
- > **NOTE: You need permissions to list all the service principals.**
-
- #### Option 1: Bash
-
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/microk8s/azure/arm_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Microk8s --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Microk8s \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- --parameters templateBaseUrl="https://raw.githubusercontent.com/your--github--handle/azure_arc/microk8s-data/azure_arc_data_jumpstart/microk8s/azure/arm_template/"
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
- ![Screenshot showing deployment time](./01.png)
-
-- Once Azure resources have been provisioned, you will be able to see it in the Azure portal. At this point, the resource group should have **13 various Azure resources deployed**.
-
- ![Screenshot showing ARM template deployment completed](./02.png)
-
- ![Screenshot showing new Azure resource group with all resources](./03.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/dc_vanilla/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/dc_vanilla/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/dc_vanilla/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client NSG with blocked RDP](./05.png)
-
- ![Screenshot showing adding a new inbound security rule](./05.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./06.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./07.png)
-
- ![Screenshot showing connecting to the VM using RDP](./08.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./09.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./10.png)
-
- ![Screenshot showing connecting to the VM using JIT](./11.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script run it's course and **do not close** the PowerShell session, this will be done for you once completed.
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- ![Screenshot showing PowerShell logon script run](./19.png)
-
- ![Screenshot showing PowerShell logon script run](./20.png)
-
- ![Screenshot showing PowerShell logon script run](./21.png)
-
- Once the script finishes it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change, and the Azure Arc Data Controller will have been deployed on the cluster and be ready for use:
-
- ![Screenshot showing Wallpaper Change](./22.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **16 various Azure resources deployed**. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled data services deployed in directly connected mode is using this resource to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - Provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - **Azure Arc Data Controller** - The data controller that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing addtional Azure resources in the resource group](./23.png)
-
-- Another tool automatically deployed is Azure Data Studio along with the _Azure Data CLI_, the _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see both extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./24.png)
-
- ![Screenshot showing Azure Data Studio shortcut](./25.png)
-
-## Cluster extensions
-
-In this scenario, **three** Azure Arc-enabled Kubernetes cluster extensions were deployed:
-
-- `microsoft.azuredefender.kubernetes` - The Azure Defender cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Defender with Cluster API as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/cluster_api/cluster_api_defender_extension/#create-azure-defender-extensions-instance) scenario.
-
-- `azuremonitor-containers` - The Azure Monitor for containers cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- `arc-data-services` - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing Azure Arc-enabled Kubernetes resource](./26.png)
-
- And we see the installed extensions:
- ![Screenshot showing Azure Arc-enabled Kubernetes Cluster Extensions settings](./27.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Screenshot showing how to delete Azure resource group](./28.png)
-
-
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/mssql_mi/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/mssql_mi/_index.md
deleted file mode 100644
index 9b207fa859..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/mssql_mi/_index.md
+++ /dev/null
@@ -1,359 +0,0 @@
----
-type: docs
-title: "SQL Managed Instance ARM Template"
-linkTitle: "SQL Managed Instance ARM Template"
-weight: 2
-description: >
----
-
-## Deploy Azure Arc-enabled SQL Managed Instance on a Microk8s Kubernetes cluster in an Azure VM using ARM template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [SQL Managed Instance](https://docs.microsoft.com/azure/azure-arc/data/managed-instance-overview) deployed on a single-node [Microk8s](https://microk8s.io/) Kubernetes cluster.
-
-By the end of this scenario, you will have a Microk8s Kubernetes cluster deployed with an Azure Arc Data Controller & SQL Managed Instance (with a sample database), and a Microsoft Windows Server 2022 (Datacenter) Azure Client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- SP_OID=$(az ad sp show --id $SP_CLIENT_ID --query id -o tsv)
-
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the [Microk8s GitHub repo](https://github.com/ubuntu/microk8s):
-
-_"Microk8s is a single-package, fully conformant, lightweight Kubernetes that works on 42 flavors of Linux. Perfect for Developer workstations, IoT, Edge & CI/CD. MicroK8s tracks upstream and releases beta, RC and final bits the same day as upstream K8s."_
-
-in this scenario, we automate the installation of Microk8s on an Ubuntu 20.04 VM running on Azure using a few simple commands to install from the [Snap Store](https://snapcraft.io/microk8s), before proceeding to onboard it as an Azure Arc-enabled Kubernetes Cluster.
-
-Once our K8s Cluster is onboarded, we proceed to create a [Custom Location](https://docs.microsoft.com/azure/azure-arc/kubernetes/custom-locations), and deploy an Azure Arc Data Controller in [Directly Connected mode](https://docs.microsoft.com/azure/azure-arc/data/connectivity#connectivity-modes).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json) ARM template will initiate **five** linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/VNET.json) - Deploys a Virtual Network with a single subnet - used by our VMs.
- - [_ubuntuMicrok8s_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/ubuntuMicrok8s.json) - Deploys an Ubuntu Linux VM which will have Microk8s installed from the Snap Store.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/clientVm.json) - Deploys the Client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/mgmtStagingStorage.json) - Used for staging files in automation scripts and [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/).
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs upload.
-
-- User remotes into Client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploys and configure Azure Arc-enabled data services on the Microk8s Kubernetes cluster - including the data controller.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.parameters.example.json) (ensure to set _`deploySQLMI`_ to _**true**_):
-
- - _`sshRSAPublicKey`_ - Your SSH public key - sample syntax: `ssh-rsa AAAAB3N...NDOCE7U3DLBISw==\n`.
- - _`spnClientId`_ - Your Azure service principal id.
- - _`spnClientSecret`_ - Your Azure service principal secret.
- - _`spnTenantId`_ - Your Azure tenant id.
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name.
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for log analytics workspace deployment.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller and Azure SQL Managed Instance scenario, we will set it to _**true**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller and Azure SQL Managed Instance scenario, we leave it set to _**false**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [_microsoft/azure_arc_](https://github.com/microsoft/azure_arc) GitHub repository, but you can point this to your forked repo as well.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion.
-
-- You will also need to get the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) and export it as an environment variable:
-
- > **NOTE: You need permissions to list all the service principals.**
-
- #### Option 1: Bash
-
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/microk8s/azure/arm_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Microk8s --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Microk8s \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- --parameters templateBaseUrl="https://raw.githubusercontent.com/your--github--handle/azure_arc/microk8s-data/azure_arc_data_jumpstart/microk8s/azure/arm_template/"
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
- ![Screenshot showing deployment time](./01.png)
-
-- Once Azure resources have been provisioned, you will be able to see it in the Azure portal. At this point, the resource group should have **13 various Azure resources deployed**.
-
- ![Screenshot showing ARM template deployment completed](./02.png)
-
- ![Screenshot showing new Azure resource group with all resources](./03.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/mssql_mi/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/mssql_mi/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/mssql_mi/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client NSG with blocked RDP](./04.png)
-
- ![Screenshot showing adding a new inbound security rule](./05.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./06.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./07.png)
-
- ![Screenshot showing connecting to the VM using RDP](./08.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./09.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./10.png)
-
- ![Screenshot showing connecting to the VM using JIT](./11.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script run it's course and **do not close** the PowerShell session, this will be done for you once completed.
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- ![Screenshot showing PowerShell logon script run](./19.png)
-
- ![Screenshot showing PowerShell logon script run](./20.png)
-
- ![Screenshot showing PowerShell logon script run](./21.png)
-
- ![Screenshot showing PowerShell logon script run](./22.png)
-
- ![Screenshot showing PowerShell logon script run](./23.png)
-
- Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and the SQL Managed Instance will be deployed on the cluster and be ready to use:
-
- ![Screenshot showing Wallpaper Change](./24.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and SQL Managed Instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **17 various Azure resources deployed**. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled data services deployed in directly connected mode is using this resource to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - **Azure Arc Data Controller** - The data controller that is now deployed on the Kubernetes cluster.
-
- - **Azure Arc-enabled SQL Managed Instance** - The SQL Managed Instance that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing addtional Azure resources in the resource group](./25.png)
-
-- Another tool automatically deployed is Azure Data Studio along with the _Azure Data CLI_, the _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see both extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./26.png)
-
- ![Screenshot showing Azure Data Studio shortcut](./27.png)
-
-- Additionally, the SQL Managed Instance connection will be configured within Data Studio, as well as the sample [_AdventureWorks_](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms) database will be restored automatically for you.
-
- ![Screenshot showing configured SQL Managed Instance connection](./28.png)
-
- > **NOTE: Due to the use of Kubernetes _NodePort_ service in this scenario, the default SQL connection endpoint port number (1443) was changed to 31111.**
-
-## Cluster extensions
-
-In this scenario, **three** Azure Arc-enabled Kubernetes cluster extensions were deployed:
-
-- `microsoft.azuredefender.kubernetes` - The Azure Defender cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Defender with Cluster API as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/cluster_api/cluster_api_defender_extension/#create-azure-defender-extensions-instance) scenario.
-
-- `azuremonitor-containers` - The Azure Monitor for containers cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- `arc-data-services` - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing Azure Arc-enabled Kubernetes resource](./29.png)
-
- And we see the installed extensions:
- ![Screenshot showing Azure Arc-enabled Kubernetes Cluster Extensions settings](./30.png)
-
-## Operations
-
-### Azure Arc-enabled SQL Managed Instance Stress Simulation
-
-Included in this scenario, is a dedicated SQL stress simulation tool named _SqlQueryStress_ automatically installed for you on the Client VM. _SqlQueryStress_ will allow you to generate load on the Azure Arc-enabled SQL Managed Instance that can be done used to showcase how the SQL database and services are performing as well to highlight operational practices described in the next section.
-
-- To start with, open the _SqlQueryStress_ desktop shortcut and connect to the SQL Managed Instance **primary** endpoint IP address. This can be found in the _SQLMI Endpoints_ text file desktop shortcut that was also created for you alongside the username and password you used to deploy the environment.
-
- ![Screenshot showing how to open SqlQueryStress](./31.png)
-
- ![Screenshot showing SQLMI Endpoints text file](./32.png)
-
-- To connect, use "SQL Server Authentication" and select the deployed sample _AdventureWorks_ database (you can use the "Test" button to check the connection).
-
- ![Screenshot showing SqlQueryStress connected](./33.png)
-
-- To generate some load, we will be running a simple stored procedure. Copy the below procedure and change the number of iterations you want it to run as well as the number of threads to generate even more load on the database. In addition, change the delay between queries to 1ms for allowing the stored procedure to run for a while.
-
- ```sql
- exec [dbo].[uspGetEmployeeManagers] @BusinessEntityID = 8
- ```
-
-- As you can see from the example below, the configuration settings are 100,000 iterations, five threads per iteration, and a 1ms delay between queries. These configurations should allow you to have the stress test running for a while.
-
- ![Screenshot showing SqlQueryStress settings](./34.png)
-
- ![Screenshot showing SqlQueryStress running](./35.png)
-
-### Azure Arc-enabled SQL Managed Instance monitoring using Grafana
-
-When deploying Azure Arc-enabled data services, a [Grafana](https://grafana.com/) instance is also automatically deployed on the same Kubernetes cluster and include built-in dashboards for both Kubernetes infrastructure as well SQL Managed Instance monitoring (PostgreSQL dashboards are included as well but we will not be covering these in this section).
-
-- Now that you have the _SqlQueryStress_ stored procedure running and generating load, we can look how this is shown in the the built-in Grafana dashboard. As part of the automation, a new URL desktop shortcut simply named "Grafana" was created.
-
- ![Screenshot showing Grafana desktop shortcut](./36.png)
-
-- To log in, use the same username and password that is in the _SQLMI Endpoints_ text file desktop shortcut.
-
- ![Screenshot showing Grafana username and password](./37.png)
-
-- Navigate to the built-in "SQL Managed Instance Metrics" dashboard.
-
- ![Screenshot showing Grafana dashboards](./38.png)
-
- ![Screenshot showing Grafana "SQL Managed Instance Metrics" dashboard](./39.png)
-
-- Change the dashboard time range to "Last 5 minutes" and re-run the stress test using _SqlQueryStress_ (in case it was already finished).
-
- ![Screenshot showing last 5 minutes time range](./40.png)
-
-- You can now see how the SQL graphs are starting to show increased activity and load on the database instance.
-
- ![Screenshot showing increased load activity](./41.png)
-
- ![Screenshot showing increased load activity](./42.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Screenshot showing how to delete Azure resource group](./43.png)
-
-
diff --git a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/postgresql/_index.md b/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/postgresql/_index.md
deleted file mode 100644
index 14d62dbc48..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/postgresql/_index.md
+++ /dev/null
@@ -1,305 +0,0 @@
----
-type: docs
-title: "PostgreSQL ARM Template"
-linkTitle: "PostgreSQL ARM Template"
-weight: 3
-description: >
----
-
-## Deploy Azure Arc-enabled PostgreSQL on a Microk8s Kubernetes cluster in an Azure VM using ARM template
-
-The following Jumpstart scenario will guide you on how to deploy a "Ready to Go" environment so you can start using [Azure Arc-enabled data services](https://docs.microsoft.com/azure/azure-arc/data/overview) and [PostgreSQL](https://docs.microsoft.com/azure/azure-arc/data/what-is-azure-arc-enabled-postgres-hyperscale) deployed on a single-node [Microk8s](https://microk8s.io/) Kubernetes cluster.
-
-By the end of this scenario, you will have a Microk8s Kubernetes cluster deployed with an Azure Arc Data Controller & PostgreSQL instance (with a sample database), and a Microsoft Windows Server 2022 (Datacenter) Azure Client VM, installed & pre-configured with all the required tools needed to work with Azure Arc-enabled data services.
-
-> **NOTE: Currently, Azure Arc-enabled data services with PostgreSQL is in [public preview](https://docs.microsoft.com/azure/azure-arc/data/release-notes)**.
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP). To deploy this scenario, an Azure service principal Role-based access control (RBAC) is required:
-
- - "Owner" - Required for provisioning Azure resources, interact with Azure Arc-enabled data services billing, monitoring metrics, logs management and creating role assignment for the Monitoring Metrics Publisher role.
-
- To create it login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- SP_OID=$(az ad sp show --id $SP_CLIENT_ID --query id -o tsv)
-
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- SP_CLIENT_ID=$(az ad sp create-for-rbac -n "JumpstartArcDataSvc" --role "Owner" --scopes /subscriptions/$subscriptionId --query appId -o tsv)
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcDataSvc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Architecture (In a nutshell)
-
-From the [Microk8s GitHub repo](https://github.com/ubuntu/microk8s):
-
-_"Microk8s is a single-package, fully conformant, lightweight Kubernetes that works on 42 flavors of Linux. Perfect for Developer workstations, IoT, Edge & CI/CD. MicroK8s tracks upstream and releases beta, RC and final bits the same day as upstream K8s."_
-
-in this scenario, we automate the installation of Microk8s on an Ubuntu 20.04 VM running on Azure using a few simple commands to install from the [Snap Store](https://snapcraft.io/microk8s), before proceeding to onboard it as an Azure Arc-enabled Kubernetes Cluster.
-
-Once our K8s Cluster is onboarded, we proceed to create a [Custom Location](https://docs.microsoft.com/azure/azure-arc/kubernetes/custom-locations), and deploy an Azure Arc Data Controller in [Directly Connected mode](https://docs.microsoft.com/azure/azure-arc/data/connectivity#connectivity-modes).
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User is editing the ARM template parameters file (1-time edit) and export the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) variable to use it as a parameter. These parameters values are being used throughout the deployment.
-
-- Main [_azuredeploy_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json) ARM template will initiate **five** linked ARM templates:
-
- - [_VNET_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/VNET.json) - Deploys a Virtual Network with a single subnet - used by our VMs.
- - [_ubuntuMicrok8s_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/ubuntuMicrok8s.json) - Deploys an Ubuntu Linux VM which will have Microk8s installed from the Snap Store.
- - [_clientVm_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/clientVm.json) - Deploys the Client Windows VM. This is where all user interactions with the environment are made from.
- - [_mgmtStagingStorage_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/mgmtStagingStorage.json) - Used for staging files in automation scripts and [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/).
- - [_logAnalytics_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/logAnalytics.json) - Deploys Azure Log Analytics workspace to support Azure Arc-enabled data services logs upload.
-
-- User remotes into Client Windows VM, which automatically kicks off the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/DataServicesLogonScript.ps1) PowerShell script that deploys and configure Azure Arc-enabled data services on the Microk8s Kubernetes cluster - including the data controller.
-
-## Deployment
-
-As mentioned, this deployment will leverage ARM templates. You will deploy a single template that will initiate the entire automation for this scenario.
-
-- The deployment is using the ARM template parameters file. Before initiating the deployment, edit the [_azuredeploy.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.parameters.example.json) (ensure to set _`deployPostgreSQL`_ to _**true**_):
-
- - _`sshRSAPublicKey`_ - Your SSH public key - sample syntax: `ssh-rsa AAAAB3N...NDOCE7U3DLBISw==\n`.
- - _`spnClientId`_ - Your Azure service principal id.
- - _`spnClientSecret`_ - Your Azure service principal secret.
- - _`spnTenantId`_ - Your Azure tenant id.
- - _`windowsAdminUsername`_ - Client Windows VM Administrator name.
- - _`windowsAdminPassword`_ - Client Windows VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`logAnalyticsWorkspaceName`_ - Unique name for log analytics workspace deployment.
- - _`deploySQLMI`_ - Boolean that sets whether or not to deploy SQL Managed Instance, for this data controller and Azure PostgreSQL scenario, we will set it to _**false**_.
- - _`deployPostgreSQL`_ - Boolean that sets whether or not to deploy PostgreSQL, for this data controller and Azure PostgreSQL scenario, we leave it set to _**true**_.
- - _`templateBaseUrl`_ - GitHub URL to the deployment template - filled in by default to point to [_microsoft/azure_arc_](https://github.com/microsoft/azure_arc) GitHub repository, but you can point this to your forked repo as well.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion.
-
-- You will also need to get the Azure Custom Location Resource Provider ([RP](https://learn.microsoft.com/azure/azure-resource-manager/management/resource-providers-and-types)) Object ID (OID) and export it as an environment variable:
-
- > **NOTE: You need permissions to list all the service principals.**
-
- #### Option 1: Bash
-
- ```bash
- customLocationRPOID=$(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
- #### Option 2: PowerShell
-
- ```powershell
- $customLocationRPOID=(az ad sp list --filter "displayname eq 'Custom Locations RP'" --query "[?appDisplayName=='Custom Locations RP'].id" -o tsv)
- ```
-
-- To deploy the ARM template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_data_jumpstart/microk8s/azure/arm_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json \
- --parameters \
- --parameters customLocationRPOID="$customLocationRPOID"
- ```
-
- > **NOTE: Make sure that you are using the same Azure resource group name as the one you've just used in the `azuredeploy.parameters.json` file**
-
- For example:
-
- ```shell
- az group create --name Arc-Data-Microk8s --location "East US"
- az deployment group create \
- --resource-group Arc-Data-Microk8s \
- --name arcdatademo \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/azuredeploy.json \
- --parameters customLocationRPOID="$customLocationRPOID" \
- --parameters azuredeploy.parameters.json
- --parameters templateBaseUrl="https://raw.githubusercontent.com/your--github--handle/azure_arc/microk8s-data/azure_arc_data_jumpstart/microk8s/azure/arm_template/"
- ```
-
- > **NOTE: The deployment time for this scenario can take ~15-20min**
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
- ![Screenshot showing deployment time](./01.png)
-
-- Once Azure resources have been provisioned, you will be able to see it in the Azure portal. At this point, the resource group should have **13 various Azure resources deployed**.
-
- ![Screenshot showing ARM template deployment completed](./02.png)
-
- ![Screenshot showing New Azure resource group with all resources](./03.png)
-
-## Windows Login & Post Deployment
-
-Various options are available to connect to _Arc-Data-Client_ VM, depending on the parameters you supplied during deployment.
-
-- [RDP](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/postgresql/#connecting-directly-with-rdp) - available after configuring access to port 3389 on the _Arc-Data-Client-NSG_, or by enabling [Just-in-Time access (JIT)](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/postgresql/#connect-using-just-in-time-access-jit).
-- [Azure Bastion](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_data/microk8s/azure/arm_template/postgresql/#connect-using-azure-bastion) - available if ```true``` was the value of your _`deployBastion`_ parameter during deployment.
-
-### Connecting directly with RDP
-
-By design, port 3389 is not allowed on the network security group. Therefore, you must create an NSG rule to allow inbound 3389.
-
-- Open the _Arc-Data-Client-NSG_ resource in Azure portal and click "Add" to add a new rule.
-
- ![Screenshot showing Arc-Data-Client NSG with blocked RDP](./04.png)
-
- ![Screenshot showing adding a new inbound security rule](./05.png)
-
-- Specify the IP address that you will be connecting from and select RDP as the service with "Allow" set as the action. You can retrieve your public IP address by accessing [https://icanhazip.com](https://icanhazip.com) or [https://whatismyip.com](https://whatismyip.com).
-
- ![Screenshot showing all inbound security rule](./06.png)
-
- ![Screenshot showing all NSG rules after opening RDP](./07.png)
-
- ![Screenshot showing connecting to the VM using RDP](./08.png)
-
-### Connect using Azure Bastion
-
-- If you have chosen to deploy Azure Bastion in your deployment, use it to connect to the VM.
-
- ![Screenshot showing connecting to the VM using Bastion](./09.png)
-
- > **NOTE: When using Azure Bastion, the desktop background image is not visible. Therefore some screenshots in this guide may not exactly match your experience if you are connecting with Azure Bastion.**
-
-### Connect using just-in-time access (JIT)
-
-If you already have [Microsoft Defender for Cloud](https://docs.microsoft.com/azure/defender-for-cloud/just-in-time-access-usage?tabs=jit-config-asc%2Cjit-request-asc) enabled on your subscription and would like to use JIT to access the Client VM, use the following steps:
-
-- In the Client VM configuration pane, enable just-in-time. This will enable the default settings.
-
- ![Screenshot showing the Microsoft Defender for cloud portal, allowing RDP on the client VM](./10.png)
-
- ![Screenshot showing connecting to the VM using JIT](./11.png)
-
-### Post Deployment
-
-- At first login, as mentioned in the "Automation Flow" section above, the [_DataServicesLogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_data_jumpstart/microk8s/azure/arm_template/artifacts/DataServicesLogonScript.ps1) PowerShell logon script will start it's run.
-
-- Let the script run it's course and **do not close** the PowerShell session, this will be done for you once completed.
-
- ![Screenshot showing PowerShell logon script run](./12.png)
-
- ![Screenshot showing PowerShell logon script run](./13.png)
-
- ![Screenshot showing PowerShell logon script run](./14.png)
-
- ![Screenshot showing PowerShell logon script run](./15.png)
-
- ![Screenshot showing PowerShell logon script run](./16.png)
-
- ![Screenshot showing PowerShell logon script run](./17.png)
-
- ![Screenshot showing PowerShell logon script run](./18.png)
-
- ![Screenshot showing PowerShell logon script run](./19.png)
-
- ![Screenshot showing PowerShell logon script run](./20.png)
-
- ![Screenshot showing PowerShell logon script run](./21.png)
-
- ![Screenshot showing PowerShell logon script run](./22.png)
-
- ![Screenshot showing PowerShell logon script run](./23.png)
-
- Once the script will finish it's run, the logon script PowerShell session will be closed, the Windows wallpaper will change and both the Azure Arc Data Controller and the PostgreSQL will be deployed on the cluster and be ready to use:
-
- ![Screenshot showing Wallpaper Change](./24.png)
-
-- Since this scenario is deploying the Azure Arc Data Controller and PostgreSQL instance, you will also notice additional newly deployed Azure resources in the resources group (at this point you should have **17 various Azure resources deployed**. The important ones to notice are:
-
- - **Azure Arc-enabled Kubernetes cluster** - Azure Arc-enabled data services deployed in directly connected mode is using this resource to deploy the data services [cluster extension](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-extensions), as well as using Azure Arc [Custom locations](https://docs.microsoft.com/azure/azure-arc/kubernetes/conceptual-custom-locations).
-
- - **Custom location** - provides a way for tenant administrators to use their Azure Arc-enabled Kubernetes clusters as a target location for deploying Azure services.
-
- - **Azure Arc Data Controller** - The data controller that is now deployed on the Kubernetes cluster.
-
- - **Azure Arc-enabled PostgreSQL** - The PostgreSQL that is now deployed on the Kubernetes cluster.
-
- ![Screenshot showing addtional Azure resources in the resource group](./25.png)
-
-- As part of the automation, Azure Data Studio is installed along with the _Azure Data CLI_, _Azure CLI_, _Azure Arc_ and the _PostgreSQL_ extensions. Using the Desktop shortcut created for you, open Azure Data Studio and click the Extensions settings to see the installed extensions.
-
- ![Screenshot showing Azure Data Studio shortcut](./26.png)
-
- ![Screenshot showing Azure Data Studio extensions](./27.png)
-
-- Additionally, the PostgreSQL connection will be configured automatically for you. As mentioned, the sample _AdventureWorks_ database was restored as part of the automation.
-
- ![Screenshot showing Azure Data Studio PostgreSQL connection](./28.png)
-
- > **NOTE: Due to the use of Kubernetes _NodePort_ service in this scenario, the default PostgreSQL connection endpoint port number (5432) was changed to 31111.**
-
-## Cluster extensions
-
-In this scenario, **three** Azure Arc-enabled Kubernetes cluster extensions were deployed:
-
-- `microsoft.azuredefender.kubernetes` - The Azure Defender cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Defender with Cluster API as an Azure Arc Connected Cluster using Kubernetes extensions"](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/cluster_api/cluster_api_defender_extension/#create-azure-defender-extensions-instance) scenario.
-
-- `azuremonitor-containers` - The Azure Monitor for containers cluster extension. To learn more about it, you can check our Jumpstart ["Integrate Azure Monitor for Containers with GKE as an Azure Arc Connected Cluster using Kubernetes extensions](https://azurearcjumpstart.io/azure_arc_jumpstart/azure_arc_k8s/day2/gke/gke_monitor_extension/) scenario.
-
-- `arc-data-services` - The Azure Arc-enabled data services cluster extension that was used throughout this scenario in order to deploy the data services infrastructure.
-
- In order to view these cluster extensions, click on the Azure Arc-enabled Kubernetes resource Extensions settings.
-
- ![Screenshot showing Azure Arc-enabled Kubernetes resource](./29.png)
-
- And we see the installed extensions:
- ![Screenshot showing Azure Arc-enabled Kubernetes Cluster Extensions settings](./30.png)
-
-## Cleanup
-
-- If you want to delete the entire environment, simply delete the deployed resource group from the Azure portal.
-
- ![Screenshot showing how to delete Azure resource group](./31.png)
-
-
diff --git a/docs/azure_arc_jumpstart/azure_arc_k8s/_index.md b/docs/azure_arc_jumpstart/azure_arc_k8s/_index.md
deleted file mode 100644
index 9c3355609e..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_k8s/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Arc-enabled Kubernetes"
-linkTitle: "Azure Arc-enabled Kubernetes"
-weight: 4
-description: >-
- The deployment scenarios in this section will guide you through onboarding various Kubernetes distributions as an Azure Arc-enabled Kubernetes clusters.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_k8s/aks/_index.md b/docs/azure_arc_jumpstart/azure_arc_k8s/aks/_index.md
deleted file mode 100644
index fd62af122a..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_k8s/aks/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Kubernetes Service (AKS)"
-linkTitle: "Azure Kubernetes Service (AKS)"
-weight: 2
-description: >-
- If you do not yet have a Kubernetes cluster, the scenarios in this section will guide on creating an AKS cluster in order to simulate an "on-premises" cluster in an automated fashion using either ARM template or Terraform.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_k8s/aks/aks_arm_template/_index.md b/docs/azure_arc_jumpstart/azure_arc_k8s/aks/aks_arm_template/_index.md
deleted file mode 100644
index 7546b69e91..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_k8s/aks/aks_arm_template/_index.md
+++ /dev/null
@@ -1,202 +0,0 @@
----
-type: docs
-title: "AKS cluster ARM template"
-linkTitle: "AKS cluster ARM template"
-weight: 1
-description: >
----
-
-## Deploy AKS cluster and connect it to Azure Arc using an Azure ARM template
-
-The following Jumpstart scenario will guide you on how to use the provided [Azure ARM Template](https://docs.microsoft.com/azure/azure-resource-manager/templates/overview) to deploy an [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster and connected it as an Azure Arc cluster resource.
-
- > **NOTE: Since AKS is a 1st-party Azure solution and natively supports capabilities such as [Azure Monitor](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview) integration as well as GitOps configurations, it is not expected for an AKS cluster to be projected as an Azure Arc-enabled Kubernetes cluster. Connecting an Azure Kubernetes Service (AKS) cluster to Azure Arc is only required for running Arc enabled services like App Services and Data Services on the cluster.**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Generate a new SSH key pair](https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed) or use an existing one (Windows 10 and above now comes with a built-in ssh client).
-
- ```shell
- ssh-keygen -t rsa -b 4096
- ```
-
- To retrieve the SSH public key after it's been created, depending on your environment, use one of the below methods:
- - In Linux, use the `cat ~/.ssh/id_rsa.pub` command.
- - In Windows (CMD/PowerShell), use the SSH public key file that by default, is located in the _`C:\Users\WINUSER/.ssh/id_rsa.pub`_ folder.
-
- SSH public key example output:
-
- ```shell
- ssh-rsa o1djFhyNe5NXyYk7XVF7wOBAAABgQDO/QPJ6IZHujkGRhiI+6s1ngK8V4OK+iBAa15GRQqd7scWgQ1RUSFAAKUxHn2TJPx/Z/IU60aUVmAq/OV9w0RMrZhQkGQz8CHRXc28S156VMPxjk/gRtrVZXfoXMr86W1nRnyZdVwojy2++sqZeP/2c5GoeRbv06NfmHTHYKyXdn0lPALC6i3OLilFEnm46Wo+azmxDuxwi66RNr9iBi6WdIn/zv7tdeE34VAutmsgPMpynt1+vCgChbdZR7uxwi66RNr9iPdMR7gjx3W7dikQEo1djFhyNe5rrejrgjerggjkXyYk7XVF7wOk0t8KYdXvLlIyYyUCk1cOD2P48ArqgfRxPIwepgW78znYuwiEDss6g0qrFKBcl8vtiJE5Vog/EIZP04XpmaVKmAWNCCGFJereRKNFIl7QfSj3ZLT2ZXkXaoLoaMhA71ko6bKBuSq0G5YaMq3stCfyVVSlHs7nzhYsX6aDU6LwM/BTO1c= user@pc
- ```
-
-- Create Azure service principal (SP)
-
- To be able to complete the scenario and its related automation, Azure service principal assigned with the “Contributor” role is required. To create it, login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcK8s" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcK8s",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Deployment Options and Automation Flow
-
-This Jumpstart scenario provides multiple paths for deploying and configuring resources. Deployment options include:
-
-- Azure portal
-- ARM template via Azure CLI
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-1. User edits the environment variables in the Shell script file (1-time edit) which then be used throughout the deployment.
-
-2. User uploads the script to Azure Cloud Shell and runs the shell script. The script will:
-
- - Connect to Azure using SPN credentials.
- - Get AKS credentials.
- - Install Azure Arc CLI extensions.
- - Connect the cluster to Azure Arc.
-
-3. User verifies the Arc-enabled Kubernetes cluster.
-
-## Deployment Option 1: Azure portal
-
-- Click the button and enter values for the the ARM template parameters.
-
- ![Screenshot showing Azure portal deployment](./01.png)
-
- ![Screenshot showing Azure portal deployment](./02.png)
-
-## Deployment Option 2: ARM template with Azure CLI
-
-- Before deploying the ARM template, determine which AKS Kubernetes versions are available in your region using the below Azure CLI command.
-
- ```shell
- az aks get-versions -l ""
- ```
-
-- The deployment is using the template parameters file. Before initiating the deployment, edit the [*azuredeploy.parameters.json*](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks/arm_template/azuredeploy.parameters.json) file to match your environment and using one of the available Kubernetes Versions from the previous step.
-
- ![Screenshot of Azure ARM template](./03.png)
-
-- To deploy the ARM template, navigate to the [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_k8s_jumpstart/aks/arm_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_k8s_jumpstart/aks/arm_template/azuredeploy.json \
- --parameters
- ```
-
- For example:
-
- ```shell
- az group create --name Arc-AKS-Demo --location "East US"
- az deployment group create \
- --resource-group Arc-AKS-Demo \
- --name arcaksdemo01 \
- --template-uri https://raw.githubusercontent.com/microsoft/azure_arc/main/azure_arc_k8s_jumpstart/aks/arm_template/azuredeploy.json \
- --parameters azuredeploy.parameters.json
- ```
-
- > **NOTE: If you receive an error message stating that the requested VM size is not available in the desired location (as an example: 'Standard_D8s_v3'), it means that there is currently a capacity restriction for that specific VM size in that particular region. Capacity restrictions can occur due to various reasons, such as high demand or maintenance activities. Microsoft Azure periodically adjusts the available capacity in each region based on usage patterns and resource availability. To continue deploying this scenario, please try to re-run the deployment using another region.**
-
-- Once the ARM template deployment is completed, a new AKS cluster in a new Azure resource group is created.
-
- ![Screenshot of Azure portal showing AKS resource](./04.png)
-
- ![Screenshot of Azure portal showing AKS resource](./05.png)
-
-## Connecting to Azure Arc
-
-- Now that you have a running AKS cluster, edit the environment variables section in the included [az_connect_aks](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks/arm_template/scripts/az_connect_aks.sh) shell script.
-
- ![Screenshot of az_connect_aks shell script](./06.png)
-
- For example:
-
- ![Screenshot of az_connect_aks shell script](./07.png)
-
-- In order to keep your local environment clean and untouched, we will use [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/overview) (located in the top-right corner of the Azure portal) to run the *az_connect_aks* shell script against the AKS cluster. **Make sure Cloud Shell is configured to use Bash.**
-
- ![Screenshot of Azure Cloud Shell button in Visual Studio Code](./08.png)
-
-- After editing the environment variables in the [*az_connect_aks*](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks/arm_template/scripts/az_connect_aks.sh) shell script to match your parameters, save the file and then upload it to the Cloud Shell environment and run it using the ```. ./az_connect_aks.sh``` command.
-
- > **NOTE: The extra dot is due to the script having an *export* function and needs to have the vars exported in the same shell session as the other commands.**
-
- ![Screenshot showing upload of file to Cloud Shell](./09.png)
-
- ![Screenshot showing upload of file to Cloud Shell](./10.png)
-
-- Once the script run has finished, the AKS cluster will be projected as a new Azure Arc cluster resource.
-
- ![Screenshot showing Azure portal with Azure Arc-enabled Kubernetes resource](./11.png)
-
- ![Screenshot showing Azure Portal with Azure Arc-enabled Kubernetes resource](./12.png)
-
- ![Screenshot showing Azure Portal with Azure Arc-enabled Kubernetes resource](./13.png)
-
-## Delete the deployment
-
-The most straightforward way is to delete the Azure Arc cluster resource via the Azure Portal, just select the cluster and delete it.
-
-![Screenshot showing how to delete Azure Arc-enabled Kubernetes resource](./14.png)
-
-If you want to nuke the entire environment, run the below commands.
-
-```shell
-az deployment group delete --name --resource-group
-```
-
-```shell
-az group delete --name --yes
-```
-
-For example:
-
-```shell
-az deployment group delete --name arcaksdemo01 --resource-group Arc-AKS-Demo
-```
-
-```shell
-az group delete --name Arc-AKS-Demo --yes
-```
diff --git a/docs/azure_arc_jumpstart/azure_arc_k8s/aks/aks_terraform/_index.md b/docs/azure_arc_jumpstart/azure_arc_k8s/aks/aks_terraform/_index.md
deleted file mode 100644
index 94f365d9cf..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_k8s/aks/aks_terraform/_index.md
+++ /dev/null
@@ -1,145 +0,0 @@
----
-type: docs
-title: "AKS cluster Terraform plan"
-linkTitle: "AKS cluster Terraform plan"
-weight: 2
-description: >
----
-
-## Deploy AKS cluster and connect it to Azure Arc using Terraform
-
-The following Jumpstart scenario will guide you on how to use the provided [Terraform](https://www.terraform.io/) plan to deploy an [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/azure/aks/intro-kubernetes) cluster and connected it as an Azure Arc-enabled Kubernetes resource.
-
- > **NOTE: Since AKS is a 1st-party Azure solution and natively supports capabilities such as [Azure Monitor](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview) integration as well as GitOps configurations, it is not expected for an AKS cluster to be projected as an Azure Arc-enabled Kubernetes cluster. Connecting an Azure Kubernetes Service (AKS) cluster to Azure Arc is only required for running Arc enabled services like App Services and Data Services on the cluster.**
-
-## Prerequisites
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- [Install Terraform >=1.1.9](https://learn.hashicorp.com/terraform/getting-started/install.html)
-
-- Create Azure service principal (SP)
-
- To be able to complete the scenario and its related automation, Azure service principal assigned with the “Contributor” role is required. To create it, login to your Azure account run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArcK8s" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArcK8s",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
-
- > **NOTE: The Jumpstart scenarios are designed with as much ease of use in-mind and adhering to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well considering using a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-1. User edits the tfvars to match the environment.
-2. User runs ```terraform init``` to download the required terraform providers.
-3. User is uploading the script to Azure Cloud Shell and running the shell script. The script will:
-
- - Connect to Azure using SPN credentials.
- - Get AKS credentials.
- - Install Azure Arc CLI extensions.
- - Connecting the cluster to Azure Arc.
-
-4. User verifies the Arc-enabled Kubernetes cluster.
-
-## Deployment
-
-The only thing you need to do before executing the Terraform plan is to create the tfvars file which will be used by the plan. This is based on the Azure service principal you've just created and your subscription.
-
-- Navigate to the [terraform folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_k8s_jumpstart/aks/terraform) and fill in the terraform.tfvars file with the values for your environment.
-
-In addition, validate that the AKS Kubernetes version is available in your region using the below Azure CLI command.
-
-```shell
-az aks get-versions -l ""
-```
-
-In case the AKS service is not available in your region, you can change the AKS Kubernetes version in the [*variables.tf*](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks/terraform/variables.tf) file by searching for *kubernetes_version*.
-
-- Run the ```terraform init``` command which will download the required terraform providers.
-
- ![Screenshot showing terraform init being run](./01.png)
-
-- Run the *`terraform apply --auto-approve`* command and wait for the plan to finish.
-
- Once the Terraform deployment is completed, a new AKS cluster in a new Azure resource group is created.
-
- ![Screenshot showing terraform plan completing](./02.png)
-
- ![Screenshot showing Azure portal with AKS resource](./03.png)
-
- ![Screenshot showing Azure portal with AKS resource](./04.png)
-
-## Connecting to Azure Arc
-
-- Now that you have a running AKS cluster, edit the environment variables section in the included [az_connect_aks](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks/terraform/scripts/az_connect_aks.sh) shell script.
-
- ![Screenshot showing az_connect_aks shell script](./05.png)
-
- For example:
-
- ![Screenshot showing az_connect_aks shell script](./06.png)
-
-- In order to keep your local environment clean and untouched, we will use [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/overview) (located in the top-right corner of the Azure portal) to run the *az_connect_aks* shell script against the AKS cluster. **Make sure Cloud Shell is configured to use Bash.**
-
- ![Screenshot showing how to access Cloud Shell in Visual Studio Code](./07.png)
-
-- Edit the environment variables in the [*az_connect_aks*](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks/terraform/scripts/az_connect_aks.sh) shell script to match your parameters, upload it to the Cloud Shell environment and run it using the *`. ./az_connect_aks.sh`* command.
-
- > **NOTE: The extra dot is due to the script having an _export_ function and needs to have the vars exported in the same shell session as the rest of the commands.**
-
- ![Screenshot showing Cloud Shell upload functionality](./08.png)
-
- ![Screenshot showing Cloud Shell upload functionality](./09.png)
-
-- Once the script run has finished, the AKS cluster will be projected as a new Azure Arc-enabled Kubernetes resource.
-
- ![Screenshot showing Cloud Shell upload functionality](./10.png)
-
- ![Screenshot showing Azure portal with Azure Arc-enabled resource](./11.png)
-
- ![Screenshot showing Azure portal with Azure Arc-enabled resource](./12.png)
-
-## Delete the deployment
-
-The most straightforward way is to delete the Azure Arc-enabled Kubernetes resource via the Azure portal, just select the cluster and delete it.
-
-![Screenshot showing delete function in Azure portal](./13.png)
-
-If you want to nuke the entire environment, delete the resource group.
-
-![Screenshot showing delete function in Azure portal](./14.png)
diff --git a/docs/azure_arc_jumpstart/azure_arc_k8s/aks_hybrid/_index.md b/docs/azure_arc_jumpstart/azure_arc_k8s/aks_hybrid/_index.md
deleted file mode 100644
index e50fb02d51..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_k8s/aks_hybrid/_index.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-type: docs
-title: "Azure Kubernetes Service (AKS) hybrid"
-linkTitle: "Azure Kubernetes Service (AKS) hybrid"
-weight: 3
-description: >-
- Azure Kubernetes Service (AKS) hybrid deployment options ("AKS hybrid") is an on-premises implementation. The scenarios in this section will guide on creating an AKS hybrid cluster in order to simulate an "on-premises" cluster in an automated fashion.
----
diff --git a/docs/azure_arc_jumpstart/azure_arc_k8s/aks_hybrid/aks_edge_essentials_full/_index.md b/docs/azure_arc_jumpstart/azure_arc_k8s/aks_hybrid/aks_edge_essentials_full/_index.md
deleted file mode 100644
index bcc4c65820..0000000000
--- a/docs/azure_arc_jumpstart/azure_arc_k8s/aks_hybrid/aks_edge_essentials_full/_index.md
+++ /dev/null
@@ -1,240 +0,0 @@
----
-type: docs
-title: "AKS Edge Essentials multi-node deployment"
-linkTitle: "AKS Edge Essentials multi-node deployment"
-weight: 2
-description: >
----
-
-## AKS Edge Essentials multi-node deployment with Azure Arc using Azure Bicep
-
-The following Jumpstart scenario will show how to Create an AKS Edge Essentials full deployment with two VMs in Hyper-V nested virtualization in an Azure Windows Server VM, and connect the Hyper-V VMs and AKS Edge Essentials cluster to Azure Arc using [Azure Bicep](https://learn.microsoft.com/azure/azure-resource-manager/bicep/overview). The provided Azure Bicep templates are responsible for creating the Azure resources as well as executing the LogonScript (AKS Edge Essentials cluster creation and Azure Arc onboarding (Hyper-V VMs and AKS Edge Essentials cluster)) on the Azure Windows Server VM.
-
-> **NOTE: It is not expected to use a nested virtualization in a production environment, let alone using an Azure VM to do so. The below scenario is unsupported and should ONLY be used for demo and testing purposes.**
-
-![Architecture diagram](./01.png)
-
-## Prerequisites
-
-- [Install or update Azure CLI to version 2.49.0 and above](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest). Use the below command to check your current installed version.
-
- ```shell
- az --version
- ```
-
-- In case you don't already have one, you can [Create a free Azure account](https://azure.microsoft.com/free/).
-
-- Create Azure service principal (SP)
-
- To complete the scenario and its related automation, an Azure service principal with the “Contributor” role assigned is required. To create it, login to your Azure account and run the below command (this can also be done in [Azure Cloud Shell](https://shell.azure.com/)).
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- For example:
-
- ```shell
- az login
- subscriptionId=$(az account show --query id --output tsv)
- az ad sp create-for-rbac -n "JumpstartArc" --role "Contributor" --scopes /subscriptions/$subscriptionId
- ```
-
- Output should look like this:
-
- ```json
- {
- "appId": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "displayName": "JumpstartArc",
- "password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "tenant": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- }
- ```
-
- > **NOTE: If you create multiple subsequent role assignments on the same service principal, your client secret (password) will be destroyed and recreated each time. Therefore, make sure you grab the correct password**.
- > **NOTE: The Jumpstart scenarios are designed with ease of use in-mind and adhere to security-related best practices whenever possible. It is optional but highly recommended to scope the service principal to a specific [Azure subscription and resource group](https://docs.microsoft.com/cli/azure/ad/sp?view=azure-cli-latest) as well as considering use of a [less privileged service principal account](https://docs.microsoft.com/azure/role-based-access-control/best-practices)**
-
-## Automation Flow
-
-For you to get familiar with the automation and deployment flow, below is an explanation.
-
-- User edits the Azure Bicep template parameters file (1-time edit). These parameter values are used throughout the deployment.
-
-- Main [_main.bicep_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/main.bicep) will initiate the deployment of the following resources:
-
- - _Virtual Network_ - Virtual Network for Azure Windows Server VM.
- - _Network Interface_ - Network Interface for Azure Windows Server VM.
- - _Network Security Group_ - Network Security Group to allow RDP in Azure Windows Server VM.
- - _Virtual Machine_ - Azure Windows Server VM.
- - _Custom script_ - Configure the Azure Windows Server virtual machine to host the Hyper-V virtual machines that will act as AKS Edge Essentials nodes.
-
-- User remotes into the Azure Windows Server VM, which automatically kicks off the [_LogonScript_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/artifacts/LogonScript.ps1) PowerShell script to create the Hyper-V virtual machines, AKS Edge Essentials cluster, and onboard the Hyper-V VMs and AKS Edge Essentials cluster to Azure Arc.
-
-## Deployment
-
-As mentioned, this deployment will leverage Azure Bicep templates. You will deploy a single template, responsible for creating all the Azure resources in a single resource group as well onboarding the created VM to Azure Arc.
-
-- Clone the Azure Arc Jumpstart repository
-
- ```shell
- git clone https://github.com/microsoft/azure_arc.git
- ```
-
-- Before deploying the Azure Bicep template, login to Azure using Azure CLI with the ```az login``` command.
-
-- The deployment uses the Azure Bicep parameters file. Before initiating the deployment, edit the [_main.parameters.json_](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/main.parameters.json) file located in your local cloned repository folder. An example parameters file is located [here](https://github.com/microsoft/azure_arc/blob/main/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template/main.parameters.example.json).
-
- - _`kubernetesDistribution`_ - Choice (k8s | k3s) kubernetes distribution.
- - _`spnClientId`_ - Your Azure service principal id.
- - _`spnClientSecret`_ - Your Azure service principal secret.
- - _`spnTenantId`_ - Your Azure tenant id.
- - _`windowsAdminUsername`_ - Azure Windows Server VM Administrator name.
- - _`windowsAdminPassword`_ - Azure Windows Server VM Password. Password must have 3 of the following: 1 lower case character, 1 upper case character, 1 number, and 1 special character. The value must be between 12 and 123 characters long.
- - _`deployBastion`_ - Choice (true | false) to deploy Azure Bastion or not to connect to the Azure Windows Server VM.
-
-- To deploy the Azure Bicep template, navigate to the local cloned [deployment folder](https://github.com/microsoft/azure_arc/tree/main/azure_arc_k8s_jumpstart/aks_hybrid/aks_edge_essentials_full/bicep_template) and run the below command:
-
- ```shell
- az group create --name --location
- az deployment group create \
- --resource-group \
- --name \
- --template-file