From c3c89124de625053862bd8ed8996eb8b12644ce7 Mon Sep 17 00:00:00 2001 From: Tess Ferrandez Date: Wed, 13 Sep 2023 13:35:54 -0700 Subject: [PATCH] cleaning up links --- .markdown-link-check.json | 24 +++++-- docs/code-reviews/recipes/markdown.md | 2 +- .../save-output-to-variable-group.md | 22 +++---- .../devcontainers/README.md | 6 +- docs/machine-learning/ml-feasibility-study.md | 2 +- .../machine-learning/ml-project-management.md | 4 +- docs/privacy/privacy-frameworks.md | 64 +++++++++---------- docs/user-interface-engineering/README.md | 2 +- 8 files changed, 67 insertions(+), 59 deletions(-) diff --git a/.markdown-link-check.json b/.markdown-link-check.json index 061ac38a8f..e787d09d29 100644 --- a/.markdown-link-check.json +++ b/.markdown-link-check.json @@ -2,34 +2,44 @@ "ignorePatterns": [ {"pattern": "^./INSERT_URL_TO_ISSUE"}, {"pattern": "^./link-to-the-work-item"}, - {"pattern": "^http://link-to-feature-or-story-work-item"}, {"pattern": "^http://link-to-task-work-item"}, {"pattern": "^http://link-to-story-work-item"}, {"pattern": "^http://link-to-work-item"}, + {"pattern": "^http://link-to-feature-or-story-work-item"}, {"pattern": "^https://blog.cloudflare.com/cloudflare-outage/"}, + {"pattern": "https://blog.prototypr.io/software-documentation-types-and-best-practices-1726ca595c7f"}, + {"pattern": "http://link-to-feature-or-story-work-item"}, + {"pattern": "https://www.researchgate.net/publication/301839557_The_landscape_of_software_failure_cause_models" }, + {"pattern": "https://www.inverse.com/innovation/how-companies-have-optimized-the-humble-office-water-cooler"}, + {"pattern": "https://opensource.org/licenses/MIT"}, {"pattern": "^https://gitlab.com/palisade/palisade-release"}, {"pattern": "^https://portal.azure.com"}, - {"pattern": "^https://www.w3.org/blog/2019/12/trace-context-enters-proposed-recommendation/"}, {"pattern": "^https://tanzu.vmware.com/developer/guides/kubernetes/observability-prometheus-grafana-p1"}, - {"pattern": "^https://www.researchgate.net/publication/301839557_The_landscape_of_software_failure_cause_models"}, {"pattern": "^https://www.cmu.edu/iso/governance/guidelines/data-classification.html"}, - {"pattern": "^https://machinelearningmastery.com/how-to-get-baseline-results-and-why-they-matter/"}, + {"pattern": "^https://www.w3.org/blog/2019/12/trace-context-enters-proposed-recommendation/"}, + {"pattern": "^https://www.researchgate.net/publication/301839557_The_landscape_of_software_failure_cause_models"}, {"pattern": "^https://www.ranorex.com/free-trial/"}, + {"pattern": "^https://machinelearningmastery.com/how-to-get-baseline-results-and-why-they-matter/"}, {"pattern": "(.*\\.)?.opentelemetry.io"}, {"pattern": "(.*\\.)?.pluralsight.com"}, {"pattern": "^https://www.github.com"}, {"pattern": "^https://github.com"}, {"pattern": "^https://thanos.io"}, {"pattern": "^https://marketplace.visualstudio.com"}, - {"pattern": "^https://opensource.org/licenses/MIT"}, {"pattern": "^https://www.perfecto.io/"}, {"pattern": "^https://argo-cd.readthedocs.io/"}, {"pattern": "^http://pytest.org/"}, {"pattern": "^http://code.visualstudio.com/"}, {"pattern": "^https://plantuml.com/"}, - {"pattern": "^https://*.medium.com/"}, + {"pattern": "(.*\\.)?.medium.com"}, {"pattern": "^https://medium.com/"}, - {"pattern": "^https://towardsdatascience.com/"} + {"pattern": "^https://towardsdatascience.com/"}, + {"pattern": "^https://machinelearningmastery.com/"}, + {"pattern": "^https://opensource.org/licenses/MIT"}, + {"pattern": "^https://www.linkedin.com/" }, + {"pattern": "^http://127.0.0.1:10000"}, + {"pattern": "^https://thenewstack.io/which-programming-languages-use-the-least-electricity/" }, + {"pattern": "^https://www.w3.org/blog/2019/trace-context-enters-proposed-recommendation/" } ], "httpHeaders": [ { diff --git a/docs/code-reviews/recipes/markdown.md b/docs/code-reviews/recipes/markdown.md index 65f8ff2531..bdd1ae0346 100644 --- a/docs/code-reviews/recipes/markdown.md +++ b/docs/code-reviews/recipes/markdown.md @@ -188,7 +188,7 @@ Save your guidelines together with your documentation, so they are easy to refer - Avoid duplication of content, instead link to the `single source of truth` - Link but don't summarize. Summarizing content on another page leads to the content living in two places - Use meaningful anchor texts, e.g. instead of writing `Follow the instructions [here](../recipes/markdown.md)` write `Follow the [Markdown guidelines](../recipes/markdown.md)` -- Make sure links to Microsoft docs (like `https://learn.microsoft.com/something/somethingelse`) do not contain the language marker `/en-us/` or `/fr-fr/`, as this is automatically determined by the site itself. +- Make sure links to Microsoft docs do not contain the language marker `/en-us/` or `/fr-fr/`, as this is automatically determined by the site itself. ### Lists diff --git a/docs/continuous-delivery/recipes/terraform/save-output-to-variable-group.md b/docs/continuous-delivery/recipes/terraform/save-output-to-variable-group.md index 702a944114..743031a776 100644 --- a/docs/continuous-delivery/recipes/terraform/save-output-to-variable-group.md +++ b/docs/continuous-delivery/recipes/terraform/save-output-to-variable-group.md @@ -118,22 +118,22 @@ In addition, you can notice we are also using [predefined variables](https://lea AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) ``` -| System variables | Description | -| -- | -- | -| [System.AccessToken](https://learn.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken)| Special variable that carries the security token used by the running build. | -| [System.TeamFoundationCollectionUri](https://learn.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#system-variables-devops-services) | The URI of the Azure DevOps organization. | -| [System.TeamProjectId](https://learn.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#system-variables-devops-services) | The ID of the project that this build belongs to. | +| System variables | Description | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------| +| [System.AccessToken](https://learn.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken) | Special variable that carries the security token used by the running build. | +| [System.TeamFoundationCollectionUri](https://learn.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#system-variables-devops-services) | The URI of the Azure DevOps organization. | +| [System.TeamProjectId](https://learn.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#system-variables-devops-services) | The ID of the project that this build belongs to. | ## Library security Roles are defined for Library items, and membership of these roles governs the operations you can perform on those items. -| Role for library item | Description | -| -- | -- | -| Reader | Can view the item. | -| User | Can use the item when authoring build or release pipelines. For example, you must be a 'User' for a variable group to use it in a release pipeline. | -| Administrator | Can also manage membership of all other roles for the item. The user who created an item gets automatically added to the Administrator role for that item. By default, the following groups get added to the Administrator role of the library: Build Administrators, Release Administrators, and Project Administrators. | -| Creator | Can create new items in the library, but this role doesn't include Reader or User permissions. The Creator role can't manage permissions for other users. | +| Role for library item | Description | +|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Reader | Can view the item. | +| User | Can use the item when authoring build or release pipelines. For example, you must be a 'User' for a variable group to use it in a release pipeline. | +| Administrator | Can also manage membership of all other roles for the item. The user who created an item gets automatically added to the Administrator role for that item. By default, the following groups get added to the Administrator role of the library: Build Administrators, Release Administrators, and Project Administrators. | +| Creator | Can create new items in the library, but this role doesn't include Reader or User permissions. The Creator role can't manage permissions for other users. | When using `System.AccessToken`, service account ` Build Service` identity will be used to access the Library. diff --git a/docs/continuous-integration/devcontainers/README.md b/docs/continuous-integration/devcontainers/README.md index 9067fcbf7f..dbdea5e97a 100644 --- a/docs/continuous-integration/devcontainers/README.md +++ b/docs/continuous-integration/devcontainers/README.md @@ -21,7 +21,7 @@ Here are below pros and cons for both approaches: ### Run CI pipelines in native environment | Pros | Cons | -| ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +|-------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| | Can use any pipeline tasks available | Need to keep two sets of tooling and their versions in sync | | No container registry | Can take some time to start, based on tools/dependencies required | | Agent will always be up to date with security patches | The dev container should always be built within each run of the CI pipeline, to verify the changes within the branch haven't broken anything | @@ -29,7 +29,7 @@ Here are below pros and cons for both approaches: ### Run CI pipelines in the dev container without image caching | Pros | Cons | -| -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | +|----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------| | Utilities scripts will work out of the box | Need to rebuild the container for each run, given that there may be changes within the branch being built | | Rules used (for linting or unit tests) will be the same on the CI | Not everything in the container is needed for the CI pipeline¹ | | No surprise for the developers, local outputs (of linting for instance) will be the same in the CI | Some pipeline tasks will not be available | @@ -44,7 +44,7 @@ Here are below pros and cons for both approaches: ### Run CI pipelines in the dev container with image registry | Pros | Cons | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | +|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------| | Utilities scripts will work out of the box | Need to rebuild the container for each run, given that there may be changes within the branch being built | | No surprise for the developers, local outputs (of linting for instance) will be the same in the CI | Not everything in the container is needed for the CI pipeline¹ | | Rules used (for linting or unit tests) will be the same on the CI | Some pipeline tasks will not be available ² | diff --git a/docs/machine-learning/ml-feasibility-study.md b/docs/machine-learning/ml-feasibility-study.md index 3d5f87a2da..a0046ad43c 100644 --- a/docs/machine-learning/ml-feasibility-study.md +++ b/docs/machine-learning/ml-feasibility-study.md @@ -140,5 +140,5 @@ The main outcome is a feasibility study report, with a recommendation on next st * We may look at re-scoping the problem taking into account the findings of the feasibility study * We assess the possibility to collect more data or improve data quality -- If there is enough evidence to support the hypothesis that this problem can be solved using ML +- If there is enough evidence to support the hypothesis that this problem can be solved using ML * Provide recommendations and technical assets for moving to the operationalization phase diff --git a/docs/machine-learning/ml-project-management.md b/docs/machine-learning/ml-project-management.md index c980ebbb7e..f0da2ac95e 100644 --- a/docs/machine-learning/ml-project-management.md +++ b/docs/machine-learning/ml-project-management.md @@ -31,9 +31,7 @@ Within this framework, the team follows these Agile ceremonies: #### Examples of ML deliverables for each sprint - Working code (e.g. models, pipelines, exploratory code) -- Documentation of new hypotheses, and the acceptance or rejection of previous hypotheses as part of a Hypothesis Driven Analysis (HDA). See more resources on HDA here: - 1. [HDA](https://datasciencevademecum.com/2015/11/10/agile-data-science-iteration-0-the-hypothesis-driven-analysis) (from the Data Science Vademecum website). - 2. [Hypothesis Driven Development](https://barryoreilly.com/explore/blog/how-to-implement-hypothesis-driven-development/) (from Barry Oreilly's website). +- Documentation of new hypotheses, and the acceptance or rejection of previous hypotheses as part of a Hypothesis Driven Analysis (HDA). For more information see [Hypothesis Driven Development on Barry Oreilly's website](https://barryoreilly.com/explore/blog/how-to-implement-hypothesis-driven-development/) - Exploratory Data Analysis (EDA) results and learnings documented ## Notes on collaboration between ML team and software development team diff --git a/docs/privacy/privacy-frameworks.md b/docs/privacy/privacy-frameworks.md index c045cb201c..ab6752c8d7 100644 --- a/docs/privacy/privacy-frameworks.md +++ b/docs/privacy/privacy-frameworks.md @@ -41,12 +41,12 @@ However, de-identification of non-structured data often involves statistical app Here we outline several de-identification solutions available as open source: -| Solution | Notes | -| -- | -- | -| [Presidio](https://microsoft.github.io/presidio) | Presidio helps to ensure sensitive data is properly managed and governed. It provides fast identification and anonymization modules for private entities in text such as credit card numbers, names, locations, social security numbers, bitcoin wallets, US phone numbers, financial data and more in unstructured text and images. It's useful when high customization is required, for example to detect custom PII entities or languages. [Link to repo](https://aka.ms/presidio), [link to docs](https://microsoft.github.io/presidio), [link to demo](https://aka.ms/presidio-demo). | -| [FHIR tools for anonymization](https://github.com/microsoft/FHIR-Tools-for-Anonymization) | FHIR Tools for Anonymization is an open-source project that helps anonymize healthcare FHIR data (FHIR=Fast Healthcare Interoperability Resources, a standard for exchanging Electric Health Records), on-premises or in the cloud, for secondary usage such as research, public health, and more. [Link](https://github.com/microsoft/FHIR-Tools-for-Anonymization). Works with FHIR format (Stu3 and R4), allows different strategies for anonymization (date shift, crypto-hash, encrypt, substitute, perturb, generalize) | -| [ARX](https://arx.deidentifier.org/) | Anonymization using statistical models, specifically k-anonymity, ℓ-diversity, t-closeness and δ-presence. Useful for validating the anonymization of aggregated data. Links: [Repo](https://github.com/arx-deidentifier/arx), [Website](https://arx.deidentifier.org/). Written in Java. | -| [k-Anonymity](https://github.com/Nuclearstar/K-Anonymity) | GitHub repo with examples on how to produce k-anonymous datasets. k-anonymity protects the privacy of individual persons by pooling their attributes into groups of at least *k* people. [repo](https://github.com/Nuclearstar/K-Anonymity/blob/master/k-Anonymity.ipynb) | +| Solution | Notes | +|-------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Presidio](https://microsoft.github.io/presidio) | Presidio helps to ensure sensitive data is properly managed and governed. It provides fast identification and anonymization modules for private entities in text such as credit card numbers, names, locations, social security numbers, bitcoin wallets, US phone numbers, financial data and more in unstructured text and images. It's useful when high customization is required, for example to detect custom PII entities or languages. [Link to repo](https://aka.ms/presidio), [link to docs](https://microsoft.github.io/presidio), [link to demo](https://aka.ms/presidio-demo). | +| [FHIR tools for anonymization](https://github.com/microsoft/FHIR-Tools-for-Anonymization) | FHIR Tools for Anonymization is an open-source project that helps anonymize healthcare FHIR data (FHIR=Fast Healthcare Interoperability Resources, a standard for exchanging Electric Health Records), on-premises or in the cloud, for secondary usage such as research, public health, and more. [Link](https://github.com/microsoft/FHIR-Tools-for-Anonymization). Works with FHIR format (Stu3 and R4), allows different strategies for anonymization (date shift, crypto-hash, encrypt, substitute, perturb, generalize) | +| [ARX](https://arx.deidentifier.org/) | Anonymization using statistical models, specifically k-anonymity, ℓ-diversity, t-closeness and δ-presence. Useful for validating the anonymization of aggregated data. Links: [Repo](https://github.com/arx-deidentifier/arx), [Website](https://arx.deidentifier.org/). Written in Java. | +| [k-Anonymity](https://github.com/Nuclearstar/K-Anonymity) | GitHub repo with examples on how to produce k-anonymous datasets. k-anonymity protects the privacy of individual persons by pooling their attributes into groups of at least *k* people. [repo](https://github.com/Nuclearstar/K-Anonymity/blob/master/k-Anonymity.ipynb) | #### Synthetic data generation @@ -59,14 +59,14 @@ When determining the best method for creating synthetic data, it is essential fi - Fully synthetic: This data does not contain any original data, which means that re-identification of any single unit is almost impossible, and all variables are still fully available. - Partially synthetic: Only sensitive data is replaced with synthetic data, which requires a heavy dependency on the imputation model. This leads to decreased model dependence but does mean that some disclosure is possible due to the actual values within the dataset. -| Solution | Notes | -| -- | -- | -| [Synthea](https://synthetichealth.github.io/synthea/) | Synthea was developed with numerous data sources collected on the internet, including US Census Bureau demographics, Centers for Disease Control and Prevention prevalence and incidence rates, and National Institutes of Health reports. The source code and disease models include annotations and citations for all data, statistics, and treatments. These models of diseases and treatments interact appropriately with the health record. | -| [PII dataset generator](https://github.com/microsoft/presidio-research/blob/master/presidio_evaluator/data_generator/README.md) | A synthetic data generator developed on top of Fake Name Generator which takes a text file with templates (e.g. my name is *PERSON*) and creates a list of Input Samples which contain fake PII entities instead of placeholders. | -| [CheckList](https://github.com/marcotcr/checklist) | CheckList provides a framework for perturbation techniques to evaluate specific behavioral capabilities of NLP models systematically | -| [Mimesis](https://github.com/lk-geimfari/mimesis) | Mimesis a high-performance fake data generator for Python, which provides data for a variety of purposes in a variety of languages. | -| [Faker](https://github.com/joke2k/faker) | Faker is a Python package that generates fake data for you. Whether you need to bootstrap your database, create good-looking XML documents, fill-in your persistence to stress test it, or anonymize data taken from a production service, Faker is for you. | -| [Plaitpy](https://github.com/plaitpy/plaitpy) | The idea behind plait.py is that it should be easy to model fake data that has an interesting shape. Currently, many fake data generators model their data as a collection of IID variables; with plait.py we can stitch together those variables into a more coherent model. | +| Solution | Notes | +|---------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Synthea](https://synthetichealth.github.io/synthea/) | Synthea was developed with numerous data sources collected on the internet, including US Census Bureau demographics, Centers for Disease Control and Prevention prevalence and incidence rates, and National Institutes of Health reports. The source code and disease models include annotations and citations for all data, statistics, and treatments. These models of diseases and treatments interact appropriately with the health record. | +| [PII dataset generator](https://github.com/microsoft/presidio-research/blob/master/presidio_evaluator/data_generator/README.md) | A synthetic data generator developed on top of Fake Name Generator which takes a text file with templates (e.g. my name is *PERSON*) and creates a list of Input Samples which contain fake PII entities instead of placeholders. | +| [CheckList](https://github.com/marcotcr/checklist) | CheckList provides a framework for perturbation techniques to evaluate specific behavioral capabilities of NLP models systematically | +| [Mimesis](https://github.com/lk-geimfari/mimesis) | Mimesis a high-performance fake data generator for Python, which provides data for a variety of purposes in a variety of languages. | +| [Faker](https://github.com/joke2k/faker) | Faker is a Python package that generates fake data for you. Whether you need to bootstrap your database, create good-looking XML documents, fill-in your persistence to stress test it, or anonymize data taken from a production service, Faker is for you. | +| [Plaitpy](https://github.com/plaitpy/plaitpy) | The idea behind plait.py is that it should be easy to model fake data that has an interesting shape. Currently, many fake data generators model their data as a collection of IID variables; with plait.py we can stitch together those variables into a more coherent model. | ### Trusted research and modeling environments @@ -81,10 +81,10 @@ and has access to one or more datasets provided by the data platform. We highlight several alternatives for Trusted Research Environments: -| Solution | Notes | -| -- | -- | -| [Azure Trusted Research Environment](https://github.com/microsoft/azuretre) | An Open Source TRE for Azure. | -| [Aridhia DRE](https://appsource.microsoft.com/en-us/product/web-apps/aridhiainformatics.analytixagility_workspace_123?tab=Overview) | | +| Solution | Notes | +|-------------------------------------------------------------------------------------------------------------------------------------|-------------------------------| +| [Azure Trusted Research Environment](https://github.com/microsoft/azuretre) | An Open Source TRE for Azure. | +| [Aridhia DRE](https://appsource.microsoft.com/en-us/product/web-apps/aridhiainformatics.analytixagility_workspace_123?tab=Overview) | | #### Eyes-off machine learning @@ -133,10 +133,10 @@ The result of the computation *F* is in an encrypted form, which on decrypting g Homomorphic Encryption frameworks: -| Solution | Notes | -| -- | -- | -| [Microsoft SEAL](https://www.microsoft.com/en-us/research/project/microsoft-seal) | Secure Cloud Storage and Computation, ML Modeling. A widely used open-source library from Microsoft that supports the BFV and the CKKS schemes. | -| [Palisade](https://palisade-crypto.org/) | A widely used open-source library from a consortium of DARPA-funded defense contractors that supports multiple homomorphic encryption schemes such as BGV, BFV, CKKS, TFHE and FHEW, among others, with multiparty support. [Link to repo](https://gitlab.com/palisade/palisade-release) | +| Solution | Notes | +|-----------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Microsoft SEAL](https://www.microsoft.com/en-us/research/project/microsoft-seal) | Secure Cloud Storage and Computation, ML Modeling. A widely used open-source library from Microsoft that supports the BFV and the CKKS schemes. | +| [Palisade](https://palisade-crypto.org/) | A widely used open-source library from a consortium of DARPA-funded defense contractors that supports multiple homomorphic encryption schemes such as BGV, BFV, CKKS, TFHE and FHEW, among others, with multiparty support. [Link to repo](https://gitlab.com/palisade/palisade-release) | | [PySift](https://github.com/OpenMined/PySyft) | Private deep learning. PySyft decouples private data from model training, using Federated Learning, Differential Privacy, and Encrypted Computation (like Multi-Party Computation (MPC) and Homomorphic Encryption (HE)) within the main Deep Learning frameworks like PyTorch and TensorFlow. A list of additional OSS tools can be found [here](https://homomorphicencryption.org/introduction/). @@ -148,10 +148,10 @@ Instead of sending data to the processing engine of the model, the approach is t Federated learning frameworks: -| Solution | Notes | -| -- | -- | -| [TensorFlow Federated Learning](https://github.com/tensorflow/federated) | OSS federated learning system built on top of TensorFlow | -| [FATE](https://fate.fedai.org/) | An OSS federated learning system with different options for deployment and different algorithms adapted for federated learning | +| Solution | Notes | +|--------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------| +| [TensorFlow Federated Learning](https://github.com/tensorflow/federated) | OSS federated learning system built on top of TensorFlow | +| [FATE](https://fate.fedai.org/) | An OSS federated learning system with different options for deployment and different algorithms adapted for federated learning | | [IBM Federated Learning](https://github.com/IBM/federated-learning-lib) | A Python based federated learning framework focused on enterprise environments. | ### Data loss prevention @@ -176,12 +176,12 @@ There are typically four levels data classification levels: Tools for data classification on Azure: -| Solution | Notes | -| -- | -- | -| [Microsoft Information Protection](https://learn.microsoft.com/en-us/microsoft-365/compliance/information-protection) (MIP) | A suite for DLP, sensitive data classification, cataloging and more. | -| [Azure Purview](https://azure.microsoft.com/en-us/services/purview/) | A unified data governance service, which includes the classification and cataloging of sensitive data. Azure Purview leverages the MIP technology for data classification and more. | -| [Data Discovery & Classification for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse](https://learn.microsoft.com/en-us/azure/azure-sql/database/data-discovery-and-classification-overview) | Basic capabilities for discovering, classifying, labeling, and reporting the sensitive data in Azure SQL and Synapse databases. | -| [Data Discovery & Classification for SQL Server](https://learn.microsoft.com/en-us/sql/relational-databases/security/sql-data-discovery-and-classification?view=sql-server-ver15&tabs=t-sql) | Capabilities for discovering, classifying, labeling & reporting the sensitive data in SQL Server databases. | +| Solution | Notes | +|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Microsoft Information Protection](https://learn.microsoft.com/en-us/microsoft-365/compliance/information-protection) (MIP) | A suite for DLP, sensitive data classification, cataloging and more. | +| [Azure Purview](https://azure.microsoft.com/en-us/services/purview/) | A unified data governance service, which includes the classification and cataloging of sensitive data. Azure Purview leverages the MIP technology for data classification and more. | +| [Data Discovery & Classification for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse](https://learn.microsoft.com/en-us/azure/azure-sql/database/data-discovery-and-classification-overview) | Basic capabilities for discovering, classifying, labeling, and reporting the sensitive data in Azure SQL and Synapse databases. | +| [Data Discovery & Classification for SQL Server](https://learn.microsoft.com/en-us/sql/relational-databases/security/sql-data-discovery-and-classification?view=sql-server-ver15&tabs=t-sql) | Capabilities for discovering, classifying, labeling & reporting the sensitive data in SQL Server databases. | Often, tools used for de-identification can also serve as sensitive data classifiers. Refer to [de-identification tools](#data-de-identification) for such tools. diff --git a/docs/user-interface-engineering/README.md b/docs/user-interface-engineering/README.md index d091b88f2f..73862d6af6 100644 --- a/docs/user-interface-engineering/README.md +++ b/docs/user-interface-engineering/README.md @@ -50,7 +50,7 @@ The benefit of building software applications is that there are truly infinite w - Rarely will you have to support legacy browsers; thus, you can rely on modern JavaScript language features! No need for build tools or even TypeScript (did you know you can [type check JavaScript](https://www.typescriptlang.org/docs/handbook/intro-to-js-ts.html)). 1. Web Component frameworks - Web Components are now standardized in all modern browsers - - Microsoft has their own, stable & actively-maintained framework, [Fast](https://fast.design) + - Microsoft has their own, stable & actively-maintained framework, [Fast](https://www.fast.design/) For more information of choosing the right implementation tool, read the [Recommended Technologies](./recommended-technologies.md) document.