diff --git a/.github/workflows/ai.yaml b/.github/workflows/ai.yaml deleted file mode 100644 index 1891f77d5..000000000 --- a/.github/workflows/ai.yaml +++ /dev/null @@ -1,78 +0,0 @@ -name: Plural AI -on: - push: - branches: - - master - - "renovate/frontend/*" - paths: - - ".github/workflows/ai.yaml" - - "ai/**" - pull_request: - branches: - - "**" - paths: - - ".github/workflows/ai.yaml" - - "ai/**" -jobs: - build: - name: Build image - runs-on: ubuntu-20.04 - permissions: - contents: 'read' - id-token: 'write' - packages: 'write' - security-events: write - actions: read - steps: - - uses: actions/checkout@v3 - - name: Docker meta - id: meta - uses: docker/metadata-action@v4 - with: - # list of Docker images to use as base name for tags - images: | - ghcr.io/pluralsh/plural-ai - # generate Docker tags based on the following events/attributes - tags: | - type=sha - type=ref,event=pr - type=ref,event=branch - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Login to GHCR - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - uses: docker/build-push-action@v3 - with: - context: ./ai - file: ./ai/Dockerfile - push: true - load: false - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64 - cache-from: type=gha - cache-to: type=gha,mode=max - - name: Run Trivy vulnerability scanner on frontend image - uses: aquasecurity/trivy-action@master - with: - scan-type: 'image' - image-ref: ${{ fromJSON(steps.meta.outputs.json).tags[0] }} - hide-progress: false - format: 'sarif' - output: 'trivy-results.sarif' - security-checks: 'vuln,secret' - ignore-unfixed: true - #severity: 'CRITICAL,HIGH' - # env: - # TRIVY_SKIP_DB_UPDATE: true - # TRIVY_SKIP_JAVA_DB_UPDATE: true - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: 'trivy-results.sarif' \ No newline at end of file diff --git a/ai/.dockerignore b/ai/.dockerignore deleted file mode 100644 index ac9ab8c21..000000000 --- a/ai/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -Dockerfile -.gitattributes -.gitignore \ No newline at end of file diff --git a/ai/.gitattributes b/ai/.gitattributes deleted file mode 100644 index dfe077042..000000000 --- a/ai/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -# Auto detect text files and perform LF normalization -* text=auto diff --git a/ai/.gitignore b/ai/.gitignore deleted file mode 100644 index 4886616fc..000000000 --- a/ai/.gitignore +++ /dev/null @@ -1,161 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so -renv - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ diff --git a/ai/Dockerfile b/ai/Dockerfile deleted file mode 100644 index d42f79228..000000000 --- a/ai/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM python:3.9-slim - -WORKDIR /code - -COPY . . - -RUN pip install -r requirements.txt - -EXPOSE 8000 - -CMD uvicorn main:app --host 0.0.0.0 --port 8000 \ No newline at end of file diff --git a/ai/Makefile b/ai/Makefile deleted file mode 100644 index b6a5fe8eb..000000000 --- a/ai/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -.PHONY: help - -help: - @perl -nle'print $& if m{^[a-zA-Z_-]+:.*?## .*$$}' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -install: ## installs dependencies - pip3 install -r requirements.txt - -serve: ## starts the server - uvicorn main:app --reload - -scrape: ## attempts to compile a local graph store from various datasources - python3 scraper.py \ No newline at end of file diff --git a/ai/README.md b/ai/README.md deleted file mode 100644 index ea44a7326..000000000 --- a/ai/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# pluralai -This using the python llama index project to manage most of the embedding and storage, which has plugins for most of the stuff we need. There is both a scraper and a webserver. Run instructions are available for both. - -To do either, you'll need to install `requirements.txt` with: - -```sh -pip install -r requirments.txt -``` - -## Running the webserver -The server is built with FastAPI. To start the server by running `uvicorn main:app --reload` - -Swaggger Documentation: /docs -Chat endpoint: /chat - -## Running scraper.py - -The scraper currently incorporates three datasources: - -* the sitemap from docs.plural.sh -* the app specific docs from the plural gql api -* discord message history - -You'll need an openai api key to generate embeddings, and to scrape discord (optional) you'll also need a discord token. Export both with: - -```sh -export OPENAI_API_KEY=an-openai-key -export DISCORD_TOKEN=a-discord-token -``` - -You should then be able to run: - -```sh -./scraper.py -``` diff --git a/ai/main.py b/ai/main.py deleted file mode 100644 index 05043562e..000000000 --- a/ai/main.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import openai -from fastapi import FastAPI, HTTPException -from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context -from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer -from llama_index.embeddings import OpenAIEmbedding - -from pydantic import BaseModel - -openai.api_key = os.environ["OPENAI_API_KEY"] - -app = FastAPI() - -class QueryRequest(BaseModel): - question: str - -class QueryResponse(BaseModel): - answer: str - - -embed_model = OpenAIEmbedding(embed_batch_size=10) -service_context = ServiceContext.from_defaults(embed_model=embed_model) -set_global_service_context(service_context) - -storage_context = StorageContext.from_defaults(persist_dir="./storage") -index = load_index_from_storage(storage_context) -query_engine = index.as_query_engine( - node_postprocessors=[SentenceEmbeddingOptimizer(percentile_cutoff=0.5)], - response_mode="compact", - similarity_cutoff=0.7 -) - -@app.get("/") -def read_root(): - return {"Hello": "World"} - -@app.post("/chat") -def query_data(request: QueryRequest): - response = query_engine.query(request.question) - if not response: - raise HTTPException(status_code=404, detail="No results found") - - return QueryResponse(answer=str(response)) diff --git a/ai/requirements.txt b/ai/requirements.txt deleted file mode 100644 index e6b83a084..000000000 --- a/ai/requirements.txt +++ /dev/null @@ -1,59 +0,0 @@ -aiohttp==3.9.0 -aiosignal==1.3.1 -anyio==3.7.1 -async-timeout==4.0.2 -attrs==23.1.0 -beautifulsoup4==4.12.2 -certifi==2023.7.22 -charset-normalizer==3.2.0 -click==8.1.4 -dataclasses-json==0.5.9 -exceptiongroup==1.1.2 -fastapi==0.100.0 -frozenlist==1.3.3 -fsspec==2023.6.0 -h11==0.14.0 -httptools==0.6.0 -idna==3.4 -langchain==0.0.329 -langchainplus-sdk==0.0.20 -llama-index==0.7.4 -marshmallow==3.19.0 -marshmallow-enum==1.5.1 -multidict==6.0.4 -mypy-extensions==1.0.0 -nest-asyncio==1.5.6 -numexpr==2.8.4 -numpy==1.24.4 -openai==0.27.8 -openapi-schema-pydantic==1.2.4 -packaging==23.1 -pandas==2.0.3 -pydantic==1.10.11 -python-dateutil==2.8.2 -python-dotenv==1.0.0 -pytz==2023.3 -PyYAML==6.0 -regex==2023.6.3 -requests==2.31.0 -six==1.16.0 -sniffio==1.3.0 -soupsieve==2.4.1 -SQLAlchemy==2.0.18 -starlette==0.27.0 -tenacity==8.2.2 -tiktoken==0.4.0 -tqdm==4.65.0 -typing-inspect==0.8.0 -typing_extensions==4.5.0 -tzdata==2023.3 -urllib3==1.26.18 -uvicorn==0.22.0 -uvloop==0.17.0 -watchfiles==0.19.0 -websockets==11.0.3 -yarl==1.9.2 -python-graphql-client -nltk -config -html2text \ No newline at end of file diff --git a/ai/scraped_data.json b/ai/scraped_data.json deleted file mode 100644 index cf77dcd5c..000000000 --- a/ai/scraped_data.json +++ /dev/null @@ -1,2179 +0,0 @@ -[ - { - "page_link": "https://github.com/airbytehq/airbyte", - "title": "airbyte readme", - "text": "

\n \"Airbyte\"\n

\n

\n Data integration platform for ELT pipelines from APIs, databases & files to databases, warehouses & lakes\n

\n

\n\n \"Test\"\n\n\n \"Release\"\n\n\n \"Slack\"\n\n\n \"YouTube\n\n\n \"Build\"\n\n\n \"License\"\n\n\n \"License\"\n\n

\n\nWe believe that only an **open-source** solution to data movement can cover the **long tail of data sources** while empowering data engineers to **customize existing connectors**. Our ultimate vision is to help you move data from any source to any destination. Airbyte already provides [300+ connectors](https://docs.airbyte.com/integrations/) for popular APIs, databases, data warehouses, and data lakes.\n\nYou can implement Airbyte connectors in any language and take the form of a Docker image that follows the [Airbyte specification](https://docs.airbyte.com/understanding-airbyte/airbyte-protocol/). You can create new connectors very fast with:\n\n- The [low-code Connector Development Kit](https://docs.airbyte.com/connector-development/config-based/low-code-cdk-overview) (CDK) for API connectors ([demo](https://www.youtube.com/watch?v=i7VSL2bDvmw))\n- The [Python CDK](https://docs.airbyte.com/connector-development/cdk-python/) ([tutorial](https://docs.airbyte.com/connector-development/tutorials/cdk-speedrun))\n\nAirbyte has a built-in scheduler and uses [Temporal](https://airbyte.com/blog/scale-workflow-orchestration-with-temporal) to orchestrate jobs and ensure reliability at scale. Airbyte leverages [dbt](https://www.youtube.com/watch?v=saXwh6SpeHA) to normalize extracted data and can trigger custom transformations in SQL and dbt. You can also orchestrate Airbyte syncs with [Airflow](https://docs.airbyte.com/operator-guides/using-the-airflow-airbyte-operator), [Prefect](https://docs.airbyte.com/operator-guides/using-prefect-task), or [Dagster](https://docs.airbyte.com/operator-guides/using-dagster-integration).\n\n![Airbyte OSS Connections UI](https://user-images.githubusercontent.com/2302748/205949986-5207ca24-f1f0-41b1-97e1-a0745a0de55a.png)\n\nExplore our [demo app](https://demo.airbyte.io/).\n\n## Quick start\n\n### Run Airbyte locally\n\nYou can run Airbyte locally with Docker. The shell script below will retrieve the requisite docker files from the [platform repository](https://github.com/airbytehq/airbyte-platform) and run docker compose for you.\n\n```bash\ngit clone --depth 1 https://github.com/airbytehq/airbyte.git\ncd airbyte\n./run-ab-platform.sh\n```\n\nLogin to the web app at [http://localhost:8000](http://localhost:8000) by entering the default credentials found in your .env file.\n\n```\nBASIC_AUTH_USERNAME=airbyte\nBASIC_AUTH_PASSWORD=password\n```\n\nFollow web app UI instructions to set up a source, destination, and connection to replicate data. Connections support the most popular sync modes: full refresh, incremental and change data capture for databases.\n\nRead the [Airbyte docs](https://docs.airbyte.com).\n\n### Manage Airbyte configurations with code\n\nYou can also programmatically manage sources, destinations, and connections with YAML files, [Octavia CLI](https://github.com/airbytehq/airbyte/tree/master/octavia-cli), and API.\n\n### Deploy Airbyte to production\n\nDeployment options: [Docker](https://docs.airbyte.com/deploying-airbyte/local-deployment), [AWS EC2](https://docs.airbyte.com/deploying-airbyte/on-aws-ec2), [Azure](https://docs.airbyte.com/deploying-airbyte/on-azure-vm-cloud-shell), [GCP](https://docs.airbyte.com/deploying-airbyte/on-gcp-compute-engine), [Kubernetes](https://docs.airbyte.com/deploying-airbyte/on-kubernetes), [Restack](https://docs.airbyte.com/deploying-airbyte/on-restack), [Plural](https://docs.airbyte.com/deploying-airbyte/on-plural), [Oracle Cloud](https://docs.airbyte.com/deploying-airbyte/on-oci-vm), [Digital Ocean](https://docs.airbyte.com/deploying-airbyte/on-digitalocean-droplet)...\n\n### Use Airbyte Cloud\n\nAirbyte Cloud is the fastest and most reliable way to run Airbyte. It is a cloud-based data integration platform that allows you to collect and consolidate data from various sources into a single, unified system. It provides a user-friendly interface for data integration, transformation, and migration.\n\nWith Airbyte Cloud, you can easily connect to various data sources such as databases, APIs, and SaaS applications. It also supports a wide range of popular data sources like Salesforce, Stripe, Hubspot, PostgreSQL, and MySQL, among others.\n\nAirbyte Cloud provides a scalable and secure platform for data integration, making it easier for users to move, transform, and replicate data across different applications and systems. It also offers features like monitoring, alerting, and scheduling to ensure data quality and reliability.\n\nSign up for [Airbyte Cloud](https://cloud.airbyte.io/signup) and get free credits in minutes.\n\n## Contributing\n\nGet started by checking Github issues and creating a Pull Request. An easy way to start contributing is to update an existing connector or create a new connector using the low-code and Python CDKs. You can find the code for existing connectors in the [connectors](https://github.com/airbytehq/airbyte/tree/master/airbyte-integrations/connectors) directory. The Airbyte platform is written in Java, and the frontend in React. You can also contribute to our docs and tutorials. Advanced Airbyte users can apply to the [Maintainer program](https://airbyte.com/maintainer-program) and [Writer Program](https://airbyte.com/write-for-the-community).\n\nIf you would like to make a contribution to the platform itself, please refer to guides in [the platform repository](https://github.com/airbytehq/airbyte-platform).\n\nRead the [Contributing guide](https://docs.airbyte.com/contributing-to-airbyte/).\n\n## Reporting vulnerabilities\n\n\u26a0\ufe0f Please do not file GitHub issues or post on our public forum for security vulnerabilities, as they are public! \u26a0\ufe0f\n\nAirbyte takes security issues very seriously. If you have any concerns about Airbyte or believe you have uncovered a vulnerability, please get in touch via the e-mail address security@airbyte.io. In the message, try to provide a description of the issue and ideally a way of reproducing it. The security team will get back to you as soon as possible.\n\nNote that this security address should be used only for undisclosed vulnerabilities. Dealing with fixed issues or general questions on how to use the security features should be handled regularly via the user and the dev lists. Please report any security problems to us before disclosing it publicly.\n\n## License\n\nSee the [LICENSE](docs/project-overview/licenses/) file for licensing information, and our [FAQ](docs/project-overview/licenses/license-faq.md) for any questions you may have on that topic.\n\n## Resources\n\n- [Connectors Registry Report](https://connectors.airbyte.com/files/generated_reports/connector_registry_report.html) for a list of connectors available in Airbyte and Airbyte Cloud\n- [Weekly office hours](https://airbyte.io/weekly-office-hours/) for live informal sessions with the Airbyte team\n- [Slack](https://slack.airbyte.io) for quick discussion with the Community and Airbyte team\n- [Discourse](https://discuss.airbyte.io/) for deeper conversations about features, connectors, and problems\n- [GitHub](https://github.com/airbytehq/airbyte) for code, issues and pull requests\n- [Youtube](https://www.youtube.com/c/AirbyteHQ) for videos on data engineering\n- [Newsletter](https://airbyte.com/newsletter) for product updates and data news\n- [Blog](https://airbyte.com/blog) for data insights articles, tutorials and updates\n- [Docs](https://docs.airbyte.com/) for Airbyte features\n- [Roadmap](https://app.harvestr.io/roadmap/view/pQU6gdCyc/launch-week-roadmap) for planned features\n\n## Thank You\n\nAirbyte would not be possible without the support and assistance of other open-source tools and companies. Visit our [thank you page](THANK-YOU.md) to lear more about how we build Airbyte.\n\n", - "source_links": [], - "id": 0 - }, - { - "page_link": "basic-auth.md", - "title": "basic-auth", - "text": "## Configuring Basic Auth\n\nAirbyte's api and web interface is not authenticated by default. We provide an oauth proxy by default to grant some security to your airbyte install, but in order to integrate with tools like airflow, you'll likely want a means to authenticate with static creds. That's where basic auth can be very useful. The process is very simple.\n\n### modify context.yaml\n\nin the `context.yaml` file at the root of your repo, simply add:\n\n```yaml\nconfiguration:\n airbyte:\n users:\n : \n : \n```\nyou can use `plural crypto random` to generate a high-entropy password if that is helpful as well.\n\n### redeploy\n\nSimply run `plural build --only airbyte && plural deploy --commit \"enabling basic auth\"` to wire in the credentials to our oauth proxy. Occasionally you need to restart the web pods to get it to take, you can find them with:\n\n```sh\nkubectl get pods -n airbyte | grep airbyte-web\n```\n\nthen delete them (allowing k8s to restart) with:\n\n```sh\nkubectl delete pod -n airbyte\n```", - "source_links": [], - "id": 1 - }, - { - "page_link": "bring-your-own-db.md", - "title": "bring-your-own-db", - "text": "## Connecting to a managed SQL\u00a0instance\n\nWe ship airbyte with the zalando postgres operator's db for persistence by default. This provides a lot of the benefits of a managed postgres instance at a lower cost, but if you'd rather use a familiar service like RDS this is still possible. You'll need to do a few things:\n\n### edit context.yaml\n\nAt the root of the repo, edit the `context.yaml` field and set `configuration.airbyte.postgresDisabled: true`, this will allow us to reconfigure airbyte for bring-your-own-db.\n\n### save the database password to a secret\n\nyou can use a number of methods for this, but simply adding a secret file as `airbyte/helm/airbyte/templates/db-password.yaml` like:\n\n```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: airbyte-db-password\nstringData:\n password: {{ .Values.externalDb.password }}\n```\n\nNote: this password needs to be in the `airbyte` namespace. If you put it in our wrapper helm chart, that will be done by default for you.\n\n### modify airbyte's helm values.yaml \n\nIf you go to `airbyte/helm/airbyte/values.yaml` you'll need to provide credentials for postgres. They should look something like:\n\n```yaml\nexternalDb:\n password: \nglobal:\n database:\n secretName: airbyte-db-password\n secretValue: password\nairbyte:\n airbyte:\n externalDatabase:\n database: \n host: \n user: \n port: 5432\n```\n\n(we're ultimately beholden to the structure defined in airbyte's upstream helm chart here)\n\n### redeploy\n\nFrom there, you should be able to run `plural build --only airbyte && plural deploy --commit \"using existing postgres instance\"` to use the managed sql instance", - "source_links": [], - "id": 2 - }, - { - "page_link": "troubleshooting.md", - "title": "troubleshooting", - "text": "# Troubleshooting Guide\n\nThis is a running doc of things that could potentially surface in your airbyte instance that can be easily addressed. You'll find most of these errors in the logs for airbyte but they can surface elsewhere as well\n\n### Failure executing: POST at: https://172.20.0.1/api/v1/namespaces/airbyte/pods. Message: Unauthorized! Configured service account doesn't have access. Service account may have been revoked. Unauthorized.\n\nIt's unclear exactly what causes this, but it's likely a bug in airbyte's kubernetes client implementation. There's a spot-fix for this, simply delete the airbyte-worker pods in your instance and allow k8s to respawn them. That will regenerate the service account token and allow airbyte to continue as normal.", - "source_links": [], - "id": 3 - }, - { - "page_link": "https://github.com/apache/airflow", - "title": "airflow readme", - "text": "\n\n# Apache Airflow\n\n[![PyPI version](https://badge.fury.io/py/apache-airflow.svg)](https://badge.fury.io/py/apache-airflow)\n[![GitHub Build](https://github.com/apache/airflow/workflows/CI%20Build/badge.svg)](https://github.com/apache/airflow/actions)\n[![Coverage Status](https://codecov.io/github/apache/airflow/coverage.svg?branch=main)](https://app.codecov.io/gh/apache/airflow/branch/main)\n[![License](https://img.shields.io/:license-Apache%202-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0.txt)\n[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/apache-airflow.svg)](https://pypi.org/project/apache-airflow/)\n[![Docker Pulls](https://img.shields.io/docker/pulls/apache/airflow.svg)](https://hub.docker.com/r/apache/airflow)\n[![Docker Stars](https://img.shields.io/docker/stars/apache/airflow.svg)](https://hub.docker.com/r/apache/airflow)\n[![PyPI - Downloads](https://img.shields.io/pypi/dm/apache-airflow)](https://pypi.org/project/apache-airflow/)\n[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/apache-airflow)](https://artifacthub.io/packages/search?repo=apache-airflow)\n[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)\n[![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAirflow.svg?style=social&label=Follow)](https://twitter.com/ApacheAirflow)\n[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://s.apache.org/airflow-slack)\n[![Contributors](https://img.shields.io/github/contributors/apache/airflow)](https://github.com/apache/airflow/graphs/contributors)\n[![OSSRank](https://shields.io/endpoint?url=https://ossrank.com/shield/6)](https://ossrank.com/p/6)\n\n[Apache Airflow](https://airflow.apache.org/docs/apache-airflow/stable/) (or simply Airflow) is a platform to programmatically author, schedule, and monitor workflows.\n\nWhen workflows are defined as code, they become more maintainable, versionable, testable, and collaborative.\n\nUse Airflow to author workflows as directed acyclic graphs (DAGs) of tasks. The Airflow scheduler executes your tasks on an array of workers while following the specified dependencies. Rich command line utilities make performing complex surgeries on DAGs a snap. The rich user interface makes it easy to visualize pipelines running in production, monitor progress, and troubleshoot issues when needed.\n\n\n\n**Table of contents**\n\n- [Project Focus](#project-focus)\n- [Principles](#principles)\n- [Requirements](#requirements)\n- [Getting started](#getting-started)\n- [Installing from PyPI](#installing-from-pypi)\n- [Official source code](#official-source-code)\n- [Convenience packages](#convenience-packages)\n- [User Interface](#user-interface)\n- [Semantic versioning](#semantic-versioning)\n- [Version Life Cycle](#version-life-cycle)\n- [Support for Python and Kubernetes versions](#support-for-python-and-kubernetes-versions)\n- [Base OS support for reference Airflow images](#base-os-support-for-reference-airflow-images)\n- [Approach to dependencies of Airflow](#approach-to-dependencies-of-airflow)\n- [Contributing](#contributing)\n- [Who uses Apache Airflow?](#who-uses-apache-airflow)\n- [Who Maintains Apache Airflow?](#who-maintains-apache-airflow)\n- [Can I use the Apache Airflow logo in my presentation?](#can-i-use-the-apache-airflow-logo-in-my-presentation)\n- [Airflow merchandise](#airflow-merchandise)\n- [Links](#links)\n- [Sponsors](#sponsors)\n\n\n\n## Project Focus\n\nAirflow works best with workflows that are mostly static and slowly changing. When the DAG structure is similar from one run to the next, it clarifies the unit of work and continuity. Other similar projects include [Luigi](https://github.com/spotify/luigi), [Oozie](https://oozie.apache.org/) and [Azkaban](https://azkaban.github.io/).\n\nAirflow is commonly used to process data, but has the opinion that tasks should ideally be idempotent (i.e., results of the task will be the same, and will not create duplicated data in a destination system), and should not pass large quantities of data from one task to the next (though tasks can pass metadata using Airflow's [XCom feature](https://airflow.apache.org/docs/apache-airflow/stable/concepts/xcoms.html)). For high-volume, data-intensive tasks, a best practice is to delegate to external services specializing in that type of work.\n\nAirflow is not a streaming solution, but it is often used to process real-time data, pulling data off streams in batches.\n\n## Principles\n\n- **Dynamic**: Airflow pipelines are configuration as code (Python), allowing for dynamic pipeline generation. This allows for writing code that instantiates pipelines dynamically.\n- **Extensible**: Easily define your own operators, executors and extend the library so that it fits the level of abstraction that suits your environment.\n- **Elegant**: Airflow pipelines are lean and explicit. Parameterizing your scripts is built into the core of Airflow using the powerful **Jinja** templating engine.\n- **Scalable**: Airflow has a modular architecture and uses a message queue to orchestrate an arbitrary number of workers.\n\n## Requirements\n\nApache Airflow is tested with:\n\n| | Main version (dev) | Stable version (2.6.0) |\n|---------------------|------------------------------|------------------------------|\n| Python | 3.7, 3.8, 3.9, 3.10 | 3.7, 3.8, 3.9, 3.10 |\n| Platform | AMD64/ARM64(\\*) | AMD64/ARM64(\\*) |\n| Kubernetes | 1.23, 1.24, 1.25, 1.26 | 1.21, 1.22, 1.23, 1.24, 1.25 |\n| PostgreSQL | 11, 12, 13, 14, 15 | 11, 12, 13, 14, 15 |\n| MySQL | 5.7, 8 | 5.7, 8 |\n| SQLite | 3.15.0+ | 3.15.0+ |\n| MSSQL | 2017(\\*), 2019(\\*) | 2017(\\*), 2019(\\*) |\n\n\\* Experimental\n\n**Note**: MySQL 5.x versions are unable to or have limitations with\nrunning multiple schedulers -- please see the [Scheduler docs](https://airflow.apache.org/docs/apache-airflow/stable/scheduler.html).\nMariaDB is not tested/recommended.\n\n**Note**: SQLite is used in Airflow tests. Do not use it in production. We recommend\nusing the latest stable version of SQLite for local development.\n\n**Note**: Airflow currently can be run on POSIX-compliant Operating Systems. For development it is regularly\ntested on fairly modern Linux Distros and recent versions of MacOS.\nOn Windows you can run it via WSL2 (Windows Subsystem for Linux 2) or via Linux Containers.\nThe work to add Windows support is tracked via [#10388](https://github.com/apache/airflow/issues/10388) but\nit is not a high priority. You should only use Linux-based distros as \"Production\" execution environment\nas this is the only environment that is supported. The only distro that is used in our CI tests and that\nis used in the [Community managed DockerHub image](https://hub.docker.com/p/apache/airflow) is\n`Debian Bullseye`.\n\n## Getting started\n\nVisit the official Airflow website documentation (latest **stable** release) for help with\n[installing Airflow](https://airflow.apache.org/docs/apache-airflow/stable/installation.html),\n[getting started](https://airflow.apache.org/docs/apache-airflow/stable/start.html), or walking\nthrough a more complete [tutorial](https://airflow.apache.org/docs/apache-airflow/stable/tutorial.html).\n\n> Note: If you're looking for documentation for the main branch (latest development branch): you can find it on [s.apache.org/airflow-docs](https://s.apache.org/airflow-docs/).\n\nFor more information on Airflow Improvement Proposals (AIPs), visit\nthe [Airflow Wiki](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvement+Proposals).\n\nDocumentation for dependent projects like provider packages, Docker image, Helm Chart, you'll find it in [the documentation index](https://airflow.apache.org/docs/).\n\n## Installing from PyPI\n\nWe publish Apache Airflow as `apache-airflow` package in PyPI. Installing it however might be sometimes tricky\nbecause Airflow is a bit of both a library and application. Libraries usually keep their dependencies open, and\napplications usually pin them, but we should do neither and both simultaneously. We decided to keep\nour dependencies as open as possible (in `setup.py`) so users can install different versions of libraries\nif needed. This means that `pip install apache-airflow` will not work from time to time or will\nproduce unusable Airflow installation.\n\nTo have repeatable installation, however, we keep a set of \"known-to-be-working\" constraint\nfiles in the orphan `constraints-main` and `constraints-2-0` branches. We keep those \"known-to-be-working\"\nconstraints files separately per major/minor Python version.\nYou can use them as constraint files when installing Airflow from PyPI. Note that you have to specify\ncorrect Airflow tag/version/branch and Python versions in the URL.\n\n\n1. Installing just Airflow:\n\n> Note: Only `pip` installation is currently officially supported.\n\nWhile it is possible to install Airflow with tools like [Poetry](https://python-poetry.org) or\n[pip-tools](https://pypi.org/project/pip-tools), they do not share the same workflow as\n`pip` - especially when it comes to constraint vs. requirements management.\nInstalling via `Poetry` or `pip-tools` is not currently supported.\n\nIf you wish to install Airflow using those tools, you should use the constraint files and convert\nthem to the appropriate format and workflow that your tool requires.\n\n\n```bash\npip install 'apache-airflow==2.6.0' \\\n --constraint \"https://raw.githubusercontent.com/apache/airflow/constraints-2.6.0/constraints-3.7.txt\"\n```\n\n2. Installing with extras (i.e., postgres, google)\n\n```bash\npip install 'apache-airflow[postgres,google]==2.6.0' \\\n --constraint \"https://raw.githubusercontent.com/apache/airflow/constraints-2.6.0/constraints-3.7.txt\"\n```\n\nFor information on installing provider packages, check\n[providers](http://airflow.apache.org/docs/apache-airflow-providers/index.html).\n\n## Official source code\n\nApache Airflow is an [Apache Software Foundation](https://www.apache.org) (ASF) project,\nand our official source code releases:\n\n- Follow the [ASF Release Policy](https://www.apache.org/legal/release-policy.html)\n- Can be downloaded from [the ASF Distribution Directory](https://downloads.apache.org/airflow)\n- Are cryptographically signed by the release manager\n- Are officially voted on by the PMC members during the\n [Release Approval Process](https://www.apache.org/legal/release-policy.html#release-approval)\n\nFollowing the ASF rules, the source packages released must be sufficient for a user to build and test the\nrelease provided they have access to the appropriate platform and tools.\n\n## Convenience packages\n\nThere are other ways of installing and using Airflow. Those are \"convenience\" methods - they are\nnot \"official releases\" as stated by the `ASF Release Policy`, but they can be used by the users\nwho do not want to build the software themselves.\n\nThose are - in the order of most common ways people install Airflow:\n\n- [PyPI releases](https://pypi.org/project/apache-airflow/) to install Airflow using standard `pip` tool\n- [Docker Images](https://hub.docker.com/r/apache/airflow) to install airflow via\n `docker` tool, use them in Kubernetes, Helm Charts, `docker-compose`, `docker swarm`, etc. You can\n read more about using, customising, and extending the images in the\n [Latest docs](https://airflow.apache.org/docs/docker-stack/index.html), and\n learn details on the internals in the [IMAGES.rst](https://github.com/apache/airflow/blob/main/IMAGES.rst) document.\n- [Tags in GitHub](https://github.com/apache/airflow/tags) to retrieve the git project sources that\n were used to generate official source packages via git\n\nAll those artifacts are not official releases, but they are prepared using officially released sources.\nSome of those artifacts are \"development\" or \"pre-release\" ones, and they are clearly marked as such\nfollowing the ASF Policy.\n\n## User Interface\n\n- **DAGs**: Overview of all DAGs in your environment.\n\n ![DAGs](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/img/dags.png)\n\n- **Grid**: Grid representation of a DAG that spans across time.\n\n ![Grid](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/img/grid.png)\n\n- **Graph**: Visualization of a DAG's dependencies and their current status for a specific run.\n\n ![Graph](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/img/graph.png)\n\n- **Task Duration**: Total time spent on different tasks over time.\n\n ![Task Duration](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/img/duration.png)\n\n- **Gantt**: Duration and overlap of a DAG.\n\n ![Gantt](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/img/gantt.png)\n\n- **Code**: Quick way to view source code of a DAG.\n\n ![Code](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/img/code.png)\n\n## Semantic versioning\n\nAs of Airflow 2.0.0, we support a strict [SemVer](https://semver.org/) approach for all packages released.\n\nThere are few specific rules that we agreed to that define details of versioning of the different\npackages:\n\n* **Airflow**: SemVer rules apply to core airflow only (excludes any changes to providers).\n Changing limits for versions of Airflow dependencies is not a breaking change on its own.\n* **Airflow Providers**: SemVer rules apply to changes in the particular provider's code only.\n SemVer MAJOR and MINOR versions for the packages are independent of the Airflow version.\n For example, `google 4.1.0` and `amazon 3.0.3` providers can happily be installed\n with `Airflow 2.1.2`. If there are limits of cross-dependencies between providers and Airflow packages,\n they are present in providers as `install_requires` limitations. We aim to keep backwards\n compatibility of providers with all previously released Airflow 2 versions but\n there will sometimes be breaking changes that might make some, or all\n providers, have minimum Airflow version specified. Change of that minimum supported Airflow version\n is a breaking change for provider because installing the new provider might automatically\n upgrade Airflow (which might be an undesired side effect of upgrading provider).\n* **Airflow Helm Chart**: SemVer rules apply to changes in the chart only. SemVer MAJOR and MINOR\n versions for the chart are independent from the Airflow version. We aim to keep backwards\n compatibility of the Helm Chart with all released Airflow 2 versions, but some new features might\n only work starting from specific Airflow releases. We might however limit the Helm\n Chart to depend on minimal Airflow version.\n* **Airflow API clients**: SemVer MAJOR and MINOR versions follow MAJOR and MINOR versions of Airflow.\n The first MAJOR or MINOR X.Y.0 release of Airflow should always be followed by X.Y.0 release of\n all clients. An airflow PATCH X.Y.Z release can be followed by a PATCH release of API clients, only\n if this PATCH is relevant to the clients.\n The clients then can release their own PATCH releases with bugfixes, independently of Airflow PATCH releases.\n As a consequence, each API client will have its own PATCH version that may or may not be in sync with the Airflow\n PATCH version. For a specific MAJOR/MINOR Airflow version, users should favor the latest PATCH version of clients\n independently of their Airflow PATCH version.\n\n## Version Life Cycle\n\nApache Airflow version life cycle:\n\n\n\n\n| Version | Current Patch/Minor | State | First Release | Limited Support | EOL/Terminated |\n|-----------|-----------------------|-----------|-----------------|-------------------|------------------|\n| 2 | 2.6.0 | Supported | Dec 17, 2020 | TBD | TBD |\n| 1.10 | 1.10.15 | EOL | Aug 27, 2018 | Dec 17, 2020 | June 17, 2021 |\n| 1.9 | 1.9.0 | EOL | Jan 03, 2018 | Aug 27, 2018 | Aug 27, 2018 |\n| 1.8 | 1.8.2 | EOL | Mar 19, 2017 | Jan 03, 2018 | Jan 03, 2018 |\n| 1.7 | 1.7.1.2 | EOL | Mar 28, 2016 | Mar 19, 2017 | Mar 19, 2017 |\n\n\n\nLimited support versions will be supported with security and critical bug fix only.\nEOL versions will not get any fixes nor support.\nWe always recommend that all users run the latest available minor release for whatever major version is in use.\nWe **highly** recommend upgrading to the latest Airflow major release at the earliest convenient time and before the EOL date.\n\n## Support for Python and Kubernetes versions\n\nAs of Airflow 2.0, we agreed to certain rules we follow for Python and Kubernetes support.\nThey are based on the official release schedule of Python and Kubernetes, nicely summarized in the\n[Python Developer's Guide](https://devguide.python.org/#status-of-python-branches) and\n[Kubernetes version skew policy](https://kubernetes.io/docs/setup/release/version-skew-policy/).\n\n1. We drop support for Python and Kubernetes versions when they reach EOL. Except for Kubernetes, a\n version stays supported by Airflow if two major cloud providers still provide support for it. We drop\n support for those EOL versions in main right after EOL date, and it is effectively removed when we release\n the first new MINOR (Or MAJOR if there is no new MINOR version) of Airflow. For example, for Python 3.7 it\n means that we will drop support in main right after 27.06.2023, and the first MAJOR or MINOR version of\n Airflow released after will not have it.\n\n2. The \"oldest\" supported version of Python/Kubernetes is the default one until we decide to switch to\n later version. \"Default\" is only meaningful in terms of \"smoke tests\" in CI PRs, which are run using this\n default version and the default reference image available. Currently `apache/airflow:latest`\n and `apache/airflow:2.6.0` images are Python 3.7 images. This means that default reference image will\n become the default at the time when we start preparing for dropping 3.7 support which is few months\n before the end of life for Python 3.7.\n\n3. We support a new version of Python/Kubernetes in main after they are officially released, as soon as we\n make them work in our CI pipeline (which might not be immediate due to dependencies catching up with\n new versions of Python mostly) we release new images/support in Airflow based on the working CI setup.\n\n## Base OS support for reference Airflow images\n\nThe Airflow Community provides conveniently packaged container images that are published whenever\nwe publish an Apache Airflow release. Those images contain:\n\n* Base OS with necessary packages to install Airflow (stable Debian OS)\n* Base Python installation in versions supported at the time of release for the MINOR version of\n Airflow released (so there could be different versions for 2.3 and 2.2 line for example)\n* Libraries required to connect to supported Databases (again the set of databases supported depends\n on the MINOR version of Airflow.\n* Predefined set of popular providers (for details see the [Dockerfile](https://raw.githubusercontent.com/apache/airflow/main/Dockerfile)).\n* Possibility of building your own, custom image where the user can choose their own set of providers\n and libraries (see [Building the image](https://airflow.apache.org/docs/docker-stack/build.html))\n* In the future Airflow might also support a \"slim\" version without providers nor database clients installed\n\nThe version of the base OS image is the stable version of Debian. Airflow supports using all currently active\nstable versions - as soon as all Airflow dependencies support building, and we set up the CI pipeline for\nbuilding and testing the OS version. Approximately 6 months before the end-of-life of a previous stable\nversion of the OS, Airflow switches the images released to use the latest supported version of the OS.\nFor example since ``Debian Buster`` end-of-life was August 2022, Airflow switched the images in `main` branch\nto use ``Debian Bullseye`` in February/March 2022. The version was used in the next MINOR release after\nthe switch happened. In case of the Bullseye switch - 2.3.0 version used ``Debian Bullseye``.\nThe images released in the previous MINOR version continue to use the version that all other releases\nfor the MINOR version used.\n\nSupport for ``Debian Buster`` image was dropped in August 2022 completely and everyone is expected to\nstop building their images using ``Debian Buster``.\n\nUsers will continue to be able to build their images using stable Debian releases until the end of life and\nbuilding and verifying of the images happens in our CI but no unit tests were executed using this image in\nthe `main` branch.\n\n## Approach to dependencies of Airflow\n\nAirflow has a lot of dependencies - direct and transitive, also Airflow is both - library and application,\ntherefore our policies to dependencies has to include both - stability of installation of application,\nbut also ability to install newer version of dependencies for those users who develop DAGs. We developed\nthe approach where `constraints` are used to make sure airflow can be installed in a repeatable way, while\nwe do not limit our users to upgrade most of the dependencies. As a result we decided not to upper-bound\nversion of Airflow dependencies by default, unless we have good reasons to believe upper-bounding them is\nneeded because of importance of the dependency as well as risk it involves to upgrade specific dependency.\nWe also upper-bound the dependencies that we know cause problems.\n\nThe constraint mechanism of ours takes care about finding and upgrading all the non-upper bound dependencies\nautomatically (providing that all the tests pass). Our `main` build failures will indicate in case there\nare versions of dependencies that break our tests - indicating that we should either upper-bind them or\nthat we should fix our code/tests to account for the upstream changes from those dependencies.\n\nWhenever we upper-bound such a dependency, we should always comment why we are doing it - i.e. we should have\na good reason why dependency is upper-bound. And we should also mention what is the condition to remove the\nbinding.\n\n### Approach for dependencies for Airflow Core\n\nThose `extras` and `providers` dependencies are maintained in `setup.cfg`.\n\nThere are few dependencies that we decided are important enough to upper-bound them by default, as they are\nknown to follow predictable versioning scheme, and we know that new versions of those are very likely to\nbring breaking changes. We commit to regularly review and attempt to upgrade to the newer versions of\nthe dependencies as they are released, but this is manual process.\n\nThe important dependencies are:\n\n* `SQLAlchemy`: upper-bound to specific MINOR version (SQLAlchemy is known to remove deprecations and\n introduce breaking changes especially that support for different Databases varies and changes at\n various speed (example: SQLAlchemy 1.4 broke MSSQL integration for Airflow)\n* `Alembic`: it is important to handle our migrations in predictable and performant way. It is developed\n together with SQLAlchemy. Our experience with Alembic is that it very stable in MINOR version\n* `Flask`: We are using Flask as the back-bone of our web UI and API. We know major version of Flask\n are very likely to introduce breaking changes across those so limiting it to MAJOR version makes sense\n* `werkzeug`: the library is known to cause problems in new versions. It is tightly coupled with Flask\n libraries, and we should update them together\n* `celery`: Celery is crucial component of Airflow as it used for CeleryExecutor (and similar). Celery\n [follows SemVer](https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions), so\n we should upper-bound it to the next MAJOR version. Also when we bump the upper version of the library,\n we should make sure Celery Provider minimum Airflow version is updated).\n* `kubernetes`: Kubernetes is a crucial component of Airflow as it is used for the KubernetesExecutor\n (and similar). Kubernetes Python library [follows SemVer](https://github.com/kubernetes-client/python#compatibility),\n so we should upper-bound it to the next MAJOR version. Also when we bump the upper version of the library,\n we should make sure Kubernetes Provider minimum Airflow version is updated.\n\n### Approach for dependencies in Airflow Providers and extras\n\nThe main part of the Airflow is the Airflow Core, but the power of Airflow also comes from a number of\nproviders that extend the core functionality and are released separately, even if we keep them (for now)\nin the same monorepo for convenience. You can read more about the providers in the\n[Providers documentation](https://airflow.apache.org/docs/apache-airflow-providers/index.html). We also\nhave set of policies implemented for maintaining and releasing community-managed providers as well\nas the approach for community vs. 3rd party providers in the [providers](PROVIDERS.rst) document.\n\nThose `extras` and `providers` dependencies are maintained in `provider.yaml` of each provider.\n\nBy default, we should not upper-bound dependencies for providers, however each provider's maintainer\nmight decide to add additional limits (and justify them with comment).\n\n## Contributing\n\nWant to help build Apache Airflow? Check out our [contributing documentation](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst).\n\nOfficial Docker (container) images for Apache Airflow are described in [IMAGES.rst](https://github.com/apache/airflow/blob/main/IMAGES.rst).\n\n## Who uses Apache Airflow?\n\nMore than 400 organizations are using Apache Airflow\n[in the wild](https://github.com/apache/airflow/blob/main/INTHEWILD.md).\n\n## Who Maintains Apache Airflow?\n\nAirflow is the work of the [community](https://github.com/apache/airflow/graphs/contributors),\nbut the [core committers/maintainers](https://people.apache.org/committers-by-project.html#airflow)\nare responsible for reviewing and merging PRs as well as steering conversations around new feature requests.\nIf you would like to become a maintainer, please review the Apache Airflow\n[committer requirements](https://github.com/apache/airflow/blob/main/COMMITTERS.rst#guidelines-to-become-an-airflow-committer).\n\n## Can I use the Apache Airflow logo in my presentation?\n\nYes! Be sure to abide by the Apache Foundation [trademark policies](https://www.apache.org/foundation/marks/#books) and the Apache Airflow [Brandbook](https://cwiki.apache.org/confluence/display/AIRFLOW/Brandbook). The most up to date logos are found in [this repo](/docs/apache-airflow/img/logos) and on the Apache Software Foundation [website](https://www.apache.org/logos/about.html).\n\n## Airflow merchandise\n\nIf you would love to have Apache Airflow stickers, t-shirt, etc. then check out\n[Redbubble Shop](https://www.redbubble.com/i/sticker/Apache-Airflow-by-comdev/40497530.EJUG5).\n\n## Links\n\n- [Documentation](https://airflow.apache.org/docs/apache-airflow/stable/)\n- [Chat](https://s.apache.org/airflow-slack)\n\n## Sponsors\n\nThe CI infrastructure for Apache Airflow has been sponsored by:\n\n\n\n\"astronomer.io\"\n\"AWS\n", - "source_links": [], - "id": 4 - }, - { - "page_link": "bring-your-db.md", - "title": "bring-your-db", - "text": "## Connecting to a managed SQL\u00a0instance\n\nWe ship airbyte with the zalando postgres operator's db for persistence by default. This provides a lot of the benefits of a managed postgres instance at a lower cost, but if you'd rather use a familiar service like RDS this is still possible. You'll need to do a few things:\n\n### edit context.yaml\n\nAt the root of the repo, edit the `context.yaml` field and set `configuration.airflow.postgresDisabled: true`, this will allow us to reconfigure airflow for bring-your-own-db.\n\n### save the database password to a secret\n\nyou can use a number of methods for this, but simply adding a secret file as `airflow/helm/airflow/templates/db-password.yaml` like:\n\n```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: airflow-db-password\nstringData:\n password: {{ .Values.externalDb.password }}\n```\n\nNote: this password needs to be in the `airflow` namespace. If you put it in our wrapper helm chart, that will be done by default for you.\n\n### modify airflow's helm values.yaml \n\nIf you go to `airflow/helm/airflow/values.yaml` you'll need to provide credentials for postgres. They should look something like:\n\n```yaml\nexternalDb:\n password: \nairflow:\n airflow:\n externalDatabase:\n database: \n host: \n passwordSecret: airflow-db-password\n passwordSecretKey: password\n user: \n port: 5432\n\n # use this for any extra connection-string settings, e.g. ?sslmode=disable\n properties: \"?sslmode=allow\"\n```\n\n### redeploy\n\nFrom there, you should be able to run `plural build --only airflow && plural deploy --commit \"using existing postgres instance\"` to use the managed sql instance", - "source_links": [], - "id": 5 - }, - { - "page_link": "pip-packages.md", - "title": "pip-packages", - "text": "## Installing pip packages\n\nFrequently an airflow project needs more than our default pip setup installed to work fully. Airflow's codebase is brittle, and we recommend you handle pip installs by baking a new docker image against ours and then wiring it into your installation. It's not actually too hard, and we can walk you through it.\n\n### Custom Dockerfile\n\nThe dockerfile for our image is found at https://github.com/pluralsh/containers/tree/main/airflow. You'll also want to keep the `requirements.txt` file adjacent to it. Simply move these two wherever you manage docker, add whatever pip packages to `requirements.txt` and push it to your container registry.\n\n### wire airflow to point to new dockerfile\n\nYou'll then want to edit `airflow/helm/airflow/values.yaml` in your installation repo with something like:\n\n```yaml\nairflow:\n airflow:\n airflow:\n image:\n repository: your.docker.repository\n tag: your-tag\n```\n\nAlternatively, you should be able to do this in the configuration section for airflow in your plural console as well.\n\n### redeploy\n\nfrom there you can simply run `plural build --only airflow && plural deploy --commit \"using custom docker image\"` to set this up", - "source_links": [], - "id": 6 - }, - { - "page_link": "https://github.com/argoproj/argo-cd", - "title": "argo-cd readme", - "text": "[![Integration tests](https://github.com/argoproj/argo-cd/workflows/Integration%20tests/badge.svg?branch=master)](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd) [![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4486/badge)](https://bestpractices.coreinfrastructure.org/projects/4486) [![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)\n[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-cd)](https://artifacthub.io/packages/helm/argo/argo-cd)\n\n# Argo CD - Declarative Continuous Delivery for Kubernetes\n\n## What is Argo CD?\n\nArgo CD is a declarative, GitOps continuous delivery tool for Kubernetes.\n\n![Argo CD UI](docs/assets/argocd-ui.gif)\n\n[![Argo CD Demo](https://img.youtube.com/vi/0WAm0y2vLIo/0.jpg)](https://youtu.be/0WAm0y2vLIo)\n\n## Why Argo CD?\n\n1. Application definitions, configurations, and environments should be declarative and version controlled.\n1. Application deployment and lifecycle management should be automated, auditable, and easy to understand.\n\n## Who uses Argo CD?\n\n[Official Argo CD user list](USERS.md)\n\n## Documentation\n\nTo learn more about Argo CD [go to the complete documentation](https://argo-cd.readthedocs.io/).\nCheck live demo at https://cd.apps.argoproj.io/.\n\n## Community\n\n### Contribution, Discussion and Support\n\n You can reach the Argo CD community and developers via the following channels:\n\n* Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions)\n* Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack)\n* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)\n* User Community meeting: [First Wednesday of the month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)\n\n\nParticipation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)\n\n\n### Blogs and Presentations\n\n1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)\n1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://blog.akuity.io/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argo-cd-7c5b4057ee49)\n1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)\n1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)\n1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)\n1. [Couchbase - How To Run a Database Cluster in Kubernetes Using Argo CD](https://youtu.be/nkPoPaVzExY)\n1. [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)\n1. [Environments Based On Pull Requests (PRs): Using Argo CD To Apply GitOps Principles On Previews](https://youtu.be/cpAaI8p4R60)\n1. [Argo CD: Applying GitOps Principles To Manage Production Environment In Kubernetes](https://youtu.be/vpWQeoaiRM4)\n1. [Creating Temporary Preview Environments Based On Pull Requests With Argo CD And Codefresh](https://codefresh.io/continuous-deployment/creating-temporary-preview-environments-based-pull-requests-argo-cd-codefresh/)\n1. [Tutorial: Everything You Need To Become A GitOps Ninja](https://www.youtube.com/watch?v=r50tRQjisxw) 90m tutorial on GitOps and Argo CD.\n1. [Comparison of Argo CD, Spinnaker, Jenkins X, and Tekton](https://www.inovex.de/blog/spinnaker-vs-argo-cd-vs-tekton-vs-jenkins-x/)\n1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://www.ibm.com/cloud/blog/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2)\n1. [GitOps for Kubeflow using Argo CD](https://v0-6.kubeflow.org/docs/use-cases/gitops-for-kubeflow/)\n1. [GitOps Toolsets on Kubernetes with CircleCI and Argo CD](https://www.digitalocean.com/community/tutorials/webinar-series-gitops-tool-sets-on-kubernetes-with-circleci-and-argo-cd)\n1. [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be)\n1. [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU). Among other things, describes how Kubeflow uses Argo CD to implement GitOPs for ML\n1. [Argo CD - GitOps Continuous Delivery for Kubernetes](https://www.youtube.com/watch?v=aWDIQMbp1cc&feature=youtu.be&t=1m4s)\n1. [Introduction to Argo CD : Kubernetes DevOps CI/CD](https://www.youtube.com/watch?v=2WSJF7d8dUg&feature=youtu.be)\n1. [GitOps Deployment and Kubernetes - using Argo CD](https://medium.com/riskified-technology/gitops-deployment-and-kubernetes-f1ab289efa4b)\n1. [Deploy Argo CD with Ingress and TLS in Three Steps: No YAML Yak Shaving Required](https://itnext.io/deploy-argo-cd-with-ingress-and-tls-in-three-steps-no-yaml-yak-shaving-required-bc536d401491)\n1. [GitOps Continuous Delivery with Argo and Codefresh](https://codefresh.io/events/cncf-member-webinar-gitops-continuous-delivery-argo-codefresh/)\n1. [Stay up to date with Argo CD and Renovate](https://mjpitz.com/blog/2020/12/03/renovate-your-gitops/)\n1. [Setting up Argo CD with Helm](https://www.arthurkoziel.com/setting-up-argocd-with-helm/)\n1. [Applied GitOps with Argo CD](https://thenewstack.io/applied-gitops-with-argocd/)\n1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)\n1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)\n1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)\n1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)\n1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)\n\n", - "source_links": [], - "id": 7 - }, - { - "page_link": "https://github.com/argoproj/argo-workflows", - "title": "argo-workflows readme", - "text": "[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)\n[![CI](https://github.com/argoproj/argo-workflows/workflows/CI/badge.svg)](https://github.com/argoproj/argo-workflows/actions?query=event%3Apush+branch%3Amaster)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830)\n[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows)\n[![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)\n\n## What is Argo Workflows?\n\nArgo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo\nWorkflows is implemented as a Kubernetes CRD (Custom Resource Definition).\n\n* Define workflows where each step in the workflow is a container.\n* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic\n graph (DAG).\n* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo\n Workflows on Kubernetes.\n\nArgo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) hosted project.\n\n[![Argo Workflows in 5 minutes](https://img.youtube.com/vi/TZgLkCFQ2tk/0.jpg)](https://www.youtube.com/watch?v=TZgLkCFQ2tk)\n\n## Use Cases\n\n* Machine Learning pipelines\n* Data and batch processing\n* ETL\n* Infrastructure automation\n* CI/CD\n\n## Why Argo Workflows?\n\n* Argo Workflows is the most popular workflow execution engine for Kubernetes.\n* It can run 1000s of workflows a day, each with 1000s of concurrent tasks.\n* Our users say it is lighter-weight, faster, more powerful, and easier to use\n* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based\n environments.\n* Cloud agnostic and can run on any Kubernetes cluster.\n\n[Read what people said in our latest survey](https://blog.argoproj.io/argo-workflows-2021-survey-results-d6fa890030ee)\n\n## Try Argo Workflows\n\n[Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo) (login using Github)\n\n![Screenshot](docs/assets/screenshot.png)\n\n## Documentation\n\n[View the docs](https://argoproj.github.io/argo-workflows/)\n\n## Ecosystem\n\nJust some of the projects that use or rely on Argo Workflows:\n\n* [Argo Events](https://github.com/argoproj/argo-events)\n* [Couler](https://github.com/couler-proj/couler)\n* [Katib](https://github.com/kubeflow/katib)\n* [Kedro](https://kedro.readthedocs.io/en/stable/)\n* [Kubeflow Pipelines](https://github.com/kubeflow/pipelines)\n* [Netflix Metaflow](https://metaflow.org)\n* [Onepanel](https://www.onepanel.ai/)\n* [Ploomber](https://github.com/ploomber/ploomber)\n* [Seldon](https://github.com/SeldonIO/seldon-core)\n* [SQLFlow](https://github.com/sql-machine-learning/sqlflow)\n* [Orchest](https://github.com/orchest/orchest/)\n\n## Client Libraries\n\nCheck out our [Java, Golang and Python clients](docs/client-libraries.md).\n\n## Quickstart\n\nThe following commands install Argo Workflows as well as some commmonly used components:\n\n```bash\nkubectl create ns argo\nkubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start-postgres.yaml\n```\n\n> **These manifests are intended to help you get started quickly. They contain hard-coded passwords that are publicly available and are not suitable in production.**\n\n## Who uses Argo Workflows?\n\n[Official Argo Workflows user list](USERS.md)\n\n## Documentation\n\n* [Get started here](docs/quick-start.md)\n* [How to write Argo Workflow specs](https://github.com/argoproj/argo-workflows/blob/master/examples/README.md)\n* [How to configure your artifact repository](docs/configure-artifact-repository.md)\n\n## Features\n\n* UI to visualize and manage Workflows\n* Artifact support (S3, Artifactory, Alibaba Cloud OSS, Azure Blob Storage, HTTP, Git, GCS, raw)\n* Workflow templating to store commonly used Workflows in the cluster\n* Archiving Workflows after executing for later access\n* Scheduled workflows using cron\n* Server interface with REST API (HTTP and GRPC)\n* DAG or Steps based declaration of workflows\n* Step level input & outputs (artifacts/parameters)\n* Loops\n* Parameterization\n* Conditionals\n* Timeouts (step & workflow level)\n* Retry (step & workflow level)\n* Resubmit (memoized)\n* Suspend & Resume\n* Cancellation\n* K8s resource orchestration\n* Exit Hooks (notifications, cleanup)\n* Garbage collection of completed workflow\n* Scheduling (affinity/tolerations/node selectors)\n* Volumes (ephemeral/existing)\n* Parallelism limits\n* Daemoned steps\n* DinD (docker-in-docker)\n* Script steps\n* Event emission\n* Prometheus metrics\n* Multiple executors\n* Multiple pod and workflow garbage collection strategies\n* Automatically calculated resource usage per step\n* Java/Golang/Python SDKs\n* Pod Disruption Budget support\n* Single-sign on (OAuth2/OIDC)\n* Webhook triggering\n* CLI\n* Out-of-the box and custom Prometheus metrics\n* Windows container support\n* Embedded widgets\n* Multiplex log viewer\n\n## Community Meetings\n\nWe host monthly community meetings where we and the community showcase demos and discuss the current and future state of\nthe project. Feel free to join us! For Community Meeting information, minutes and recordings\nplease [see here](https://bit.ly/argo-wf-cmty-mtng).\n\nParticipation in the Argo Workflows project is governed by\nthe [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)\n\n## Community Blogs and Presentations\n\n* [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)\n* [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)\n* [Argo Workflows and Pipelines - CI/CD, Machine Learning, and Other Kubernetes Workflows](https://youtu.be/UMaivwrAyTA)\n* [Argo Ansible role: Provisioning Argo Workflows on OpenShift](https://medium.com/@marekermk/provisioning-argo-on-openshift-with-ansible-and-kustomize-340a1fda8b50)\n* [Argo Workflows vs Apache Airflow](http://bit.ly/30YNIvT)\n* [CI/CD with Argo on Kubernetes](https://medium.com/@bouwe.ceunen/ci-cd-with-argo-on-kubernetes-28c1a99616a9)\n* [Running Argo Workflows Across Multiple Kubernetes Clusters](https://admiralty.io/blog/running-argo-workflows-across-multiple-kubernetes-clusters/)\n* [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/)\n* [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/)\n* [Argo integration review](http://dev.matt.hillsdon.net/2018/03/24/argo-integration-review.html)\n* TGI Kubernetes with Joe Beda: [Argo workflow system](https://www.youtube.com/watch?v=M_rxPPLG8pU&start=859)\n\n## Project Resources\n\n* Argo GitHub: https://github.com/argoproj\n* Argo Website: https://argoproj.github.io/\n* Argo Slack: [click here to join](https://argoproj.github.io/community/join-slack)\n\n## Security\n\nSee [SECURITY.md](SECURITY.md).\n", - "source_links": [], - "id": 8 - }, - { - "page_link": "https://github.com/bram2w/baserow", - "title": "baserow readme", - "text": "## Baserow is an open source no-code database tool and Airtable alternative.\n\nCreate your own online database without technical experience. Our user-friendly no-code\ntool gives you the powers of a developer without leaving your browser.\n\n* A spreadsheet database hybrid combining ease of use and powerful data organization.\n* Easily self-hosted with no storage restrictions or sign-up on https://baserow.io to\n get started immediately.\n* Alternative to Airtable.\n* Open-core with all non-premium and non-enterprise features under\n the [MIT License](https://choosealicense.com/licenses/mit/) allowing commercial and\n private use.\n* Headless and API first.\n* Uses popular frameworks and tools like [Django](https://www.djangoproject.com/),\n [Vue.js](https://vuejs.org/) and [PostgreSQL](https://www.postgresql.org/).\n\n[![Deploy to Heroku](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/bram2w/baserow/tree/master)\n\n```bash\ndocker run -v baserow_data:/baserow/data -p 80:80 -p 443:443 baserow/baserow:1.16.0\n```\n\n![Baserow screenshot](docs/assets/screenshot.png \"Baserow screenshot\")\n\n## Get Involved\n\n**We're hiring remotely**! More information at https://baserow.io/jobs.\n\nJoin our forum on https://community.baserow.io/ or on Gitter via\nhttps://gitter.im/bramw-baserow/community. See [CONTRIBUTING.md](./CONTRIBUTING.md) on\nhow to become a contributor.\n\n## Installation\n\n* [**Docker**](docs/installation/install-with-docker.md)\n* [**Ubuntu**](docs/installation/install-on-ubuntu.md)\n* [**Docker Compose** ](docs/installation/install-with-docker-compose.md)\n* [**\n Heroku**: Easily install and scale up Baserow on Heroku.](docs/installation/install-on-heroku.md)\n* [**\n Render**: Easily install and scale up Baserow on Render.](docs/installation/install-on-render.md)\n* [**\n Cloudron**: Install and update Baserow on your own Cloudron server.](docs/installation/install-on-cloudron.md)\n\n## Official documentation\n\nThe official documentation can be found on the website at https://baserow.io/docs/index\nor [here](./docs/index.md) inside the repository. The API docs can be found here at\nhttps://api.baserow.io/api/redoc/ or if you are looking for the OpenAPI schema here\nhttps://api.baserow.io/api/schema.json.\n\n## Become a sponsor\n\nIf you would like to get new features faster, then you might want to consider becoming a\nsponsor. By becoming a sponsor we can spend more time on Baserow which means faster\ndevelopment.\n\n[Become a GitHub Sponsor](https://github.com/sponsors/bram2w)\n\n## Development environment\n\nIf you want to contribute to Baserow you can setup a development environment like so:\n\n```\n$ git clone https://gitlab.com/bramw/baserow.git\n$ cd baserow\n$ ./dev.sh --build\n```\n\nThe Baserow development environment is now running.\nVisit [http://localhost:3000](http://localhost:3000) in your browser to see a working\nversion in development mode with hot code reloading and other dev features enabled.\n\nMore detailed instructions and more information about the development environment can be\nfound\nat [https://baserow.io/docs/development/development-environment](./docs/development/development-environment.md)\n.\n\n## Plugin development\n\nBecause of the modular architecture of Baserow it is possible to create plugins. Make\nyour own fields, views, applications, pages or endpoints. We also have a plugin\nboilerplate to get you started right away. More information can be found in the\n[plugin introduction](./docs/plugins/introduction.md) and in the\n[plugin boilerplate docs](./docs/plugins/boilerplate.md).\n\n## Meta\n\nCreated by Baserow B.V. - bram@baserow.io.\n\nDistributes under the MIT license. See `LICENSE` for more information.\n\nVersion: 1.16.0\n\nThe official repository can be found at https://gitlab.com/bramw/baserow.\n\nThe changelog can be found [here](./changelog.md).\n\nBecome a GitHub Sponsor [here](https://github.com/sponsors/bram2w).\n\nCommunity chat via https://gitter.im/bramw-baserow/community.\n", - "source_links": [], - "id": 9 - }, - { - "page_link": null, - "title": "bootstrap readme", - "text": null, - "source_links": [], - "id": 10 - }, - { - "page_link": "overview.md", - "title": "overview", - "text": "hello world", - "source_links": [], - "id": 11 - }, - { - "page_link": null, - "title": "bootstrap-cluster-api readme", - "text": null, - "source_links": [], - "id": 12 - }, - { - "page_link": null, - "title": "bytebase readme", - "text": null, - "source_links": [], - "id": 13 - }, - { - "page_link": "https://github.com/calcom/cal.com", - "title": "calendso readme", - "text": "\n

\n \n \"Logo\"\n\n \n\n

Cal.com (formerly Calendso)

\n\n

\n The open-source Calendly alternative.\n
\n Learn more \u00bb\n
\n
\n Slack\n \u00b7\n Website\n \u00b7\n Issues\n \u00b7\n Roadmap\n

\n

\n\n

\n \"Join\n \"Product\n \"Uptime\"\n \"Github\n \"Hacker\n \"License\"\n \"Commits-per-month\"\n \"Pricing\"\n \"Jitsu\n \"Checkly\n \n \n \n \n \n \n \n

\n\n\n\n## About The Project\n\n\"booking-screen\"\n\n# Scheduling infrastructure for absolutely everyone\n\nThe open source Calendly alternative. You are in charge\nof your own data, workflow and appearance.\n\nCalendly and other scheduling tools are awesome. It made our lives massively easier. We're using it for business meetings, seminars, yoga classes and even calls with our families. However, most tools are very limited in terms of control and customisations.\n\nThat's where Cal.com comes in. Self-hosted or hosted by us. White-label by design. API-driven and ready to be deployed on your own domain. Full control of your events and data.\n\n## Product of the Month: April\n\n#### Support us on [Product Hunt](https://www.producthunt.com/posts/calendso?utm_source=badge-top-post-badge&utm_medium=badge&utm_souce=badge-calendso)\n\n\"Cal.com \"Cal.com \"Cal.com\n\n \n\n\n### Built With\n\n- [Next.js](https://nextjs.org/)\n- [React](https://reactjs.org/)\n- [Tailwind](https://tailwindcss.com/)\n- [Prisma](https://prisma.io/)\n\n## Stay Up-to-Date\n\nCal officially launched as v.1.0 on 15th of September, however a lot of new features are coming. Watch **releases** of this repository to be notified for future updates:\n\n![cal-star-github](https://user-images.githubusercontent.com/8019099/154853944-a9e3c999-3da3-4048-b149-b4f73893c6fb.gif)\n\n\n\n## Getting Started\n\nTo get a local copy up and running, please follow these simple steps.\n\n### Prerequisites\n\nHere is what you need to be able to run Cal.\n\n- Node.js (Version: >=15.x <17)\n- PostgreSQL\n- Yarn _(recommended)_\n\n> If you want to enable any of the available integrations, you may want to obtain additional credentials for each one. More details on this can be found below under the [integrations section](#integrations).\n\n## Development\n\n### Setup\n\n1. Clone the repo into a public GitHub repository (or fork https://github.com/calcom/cal.com/fork). If you plan to distribute the code, keep the source code public to comply with [AGPLv3](https://github.com/calcom/cal.com/blob/main/LICENSE). To clone in a private repository, [acquire a commercial license](https://cal.com/sales))\n\n ```sh\n git clone https://github.com/calcom/cal.com.git\n ```\n\n1. Go to the project folder\n\n ```sh\n cd cal.com\n ```\n\n1. Install packages with yarn\n\n ```sh\n yarn\n ```\n\n1. Use `openssl rand -base64 32` to generate a key and add it under `NEXTAUTH_SECRET` in the .env file.\n\n#### Quick start with `yarn dx`\n\n> - **Requires Docker and Docker Compose to be installed**\n> - Will start a local Postgres instance with a few test users - the credentials will be logged in the console\n\n```sh\nyarn dx\n```\n\n#### Development tip\n\n> Add `NEXT_PUBLIC_DEBUG=1` anywhere in your `.env` to get logging information for all the queries and mutations driven by **trpc**.\n\n```sh\necho 'NEXT_PUBLIC_DEBUG=1' >> .env\n```\n\n#### Manual setup\n\n1. Configure environment variables in the `packages/prisma/.env` file. Replace ``, ``, ``, `` with their applicable values\n\n ```\n DATABASE_URL='postgresql://:@:'\n ```\n\n
\n If you don't know how to configure the DATABASE_URL, then follow the steps here to create a quick DB using Heroku\n\n 1. Create a free account with [Heroku](https://www.heroku.com/).\n\n 2. Create a new app.\n \"Create\n\n 3. In your new app, go to `Overview` and next to `Installed add-ons`, click `Configure Add-ons`. We need this to set up our database.\n ![image](https://user-images.githubusercontent.com/16905768/115323232-a53ba480-a17f-11eb-98db-58e2f8c52426.png)\n\n 4. Once you clicked on `Configure Add-ons`, click on `Find more add-ons` and search for `postgres`. One of the options will be `Heroku Postgres` - click on that option.\n ![image](https://user-images.githubusercontent.com/16905768/115323126-5beb5500-a17f-11eb-8030-7380310807a9.png)\n\n 5. Once the pop-up appears, click `Submit Order Form` - plan name should be `Hobby Dev - Free`.\n \"Submit\n\n 6. Once you completed the above steps, click on your newly created `Heroku Postgres` and go to its `Settings`.\n ![image](https://user-images.githubusercontent.com/16905768/115323367-e92ea980-a17f-11eb-9ff4-dec95f2ec349.png)\n\n 7. In `Settings`, copy your URI to your Cal.com .env file and replace the `postgresql://:@:` with it.\n ![image](https://user-images.githubusercontent.com/16905768/115323556-4591c900-a180-11eb-9808-2f55d2aa3995.png)\n ![image](https://user-images.githubusercontent.com/16905768/115323697-7a9e1b80-a180-11eb-9f08-a742b1037f90.png)\n\n 8. To view your DB, once you add new data in Prisma, you can use [Heroku Data Explorer](https://heroku-data-explorer.herokuapp.com/).\n
\n\n1. Set a 32 character random string in your .env file for the `CALENDSO_ENCRYPTION_KEY` (You can use a command like `openssl rand -base64 24` to generate one).\n1. Set up the database using the Prisma schema (found in `packages/prisma/schema.prisma`)\n\n ```sh\n yarn workspace @calcom/prisma db-deploy\n ```\n\n1. Run (in development mode)\n\n ```sh\n yarn dev\n ```\n\n#### Setting up your first user\n\n1. Open [Prisma Studio](https://www.prisma.io/studio) to look at or modify the database content:\n\n ```sh\n yarn db-studio\n ```\n\n1. Click on the `User` model to add a new user record.\n1. Fill out the fields `email`, `username`, `password`, and set `metadata` to empty `{}` (remembering to encrypt your password with [BCrypt](https://bcrypt-generator.com/)) and click `Save 1 Record` to create your first user.\n > New users are set on a `TRIAL` plan by default. You might want to adjust this behavior to your needs in the `packages/prisma/schema.prisma` file.\n1. Open a browser to [http://localhost:3000](http://localhost:3000) and login with your just created, first user.\n\n### E2E-Testing\n\nBe sure to set the environment variable `NEXTAUTH_URL` to the correct value. If you are running locally, as the documentation within `.env.example` mentions, the value should be `http://localhost:3000`.\n\n```sh\n# In a terminal just run:\nyarn test-e2e\n\n# To open last HTML report run:\nyarn workspace @calcom/web playwright-report\n```\n\n### Upgrading from earlier versions\n\n1. Pull the current version:\n\n ```sh\n git pull\n ```\n\n1. Check if dependencies got added/updated/removed\n\n ```sh\n yarn\n ```\n\n1. Apply database migrations by running one of the following commands:\n\n In a development environment, run:\n\n ```sh\n yarn workspace @calcom/prisma db-migrate\n ```\n\n (this can clear your development database in some cases)\n\n In a production environment, run:\n\n ```sh\n yarn workspace @calcom/prisma db-deploy\n ```\n\n1. Check for `.env` variables changes\n\n ```sh\n yarn predev\n ```\n\n1. Start the server. In a development environment, just do:\n\n ```sh\n yarn dev\n ```\n\n For a production build, run for example:\n\n ```sh\n yarn build\n yarn start\n ```\n\n1. Enjoy the new version.\n\n\n## Deployment\n\n### Docker\n\nThe Docker configuration for Cal is an effort powered by people within the community. Cal.com, Inc. does not provide official support for Docker, but we will accept fixes and documentation. Use at your own risk.\n\nIf you want to contribute to the Docker repository, [reply here](https://github.com/calcom/docker/discussions/32).\n\nThe Docker configuration can be found [in our docker repository](https://github.com/calcom/docker).\n\n### Heroku\n\n\n \"Deploy\"\n\n\n### Railway\n\n[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/new/template?template=https%3A%2F%2Fgithub.com%2Fcalendso%2Fcalendso&plugins=postgresql&envs=GOOGLE_API_CREDENTIALS%2CBASE_URL%2CNEXTAUTH_URL%2CPORT&BASE_URLDefault=http%3A%2F%2Flocalhost%3A3000&NEXTAUTH_URLDefault=http%3A%2F%2Flocalhost%3A3000&PORTDefault=3000)\n\nYou can deploy Cal on [Railway](https://railway.app/) using the button above. The team at Railway also have a [detailed blog post](https://blog.railway.app/p/calendso) on deploying Cal on their platform.\n\n### Vercel\n\nCurrently Vercel Pro Plan is required to be able to Deploy this application with Vercel, due to limitations on the number of serverless functions on the free plan.\n\n[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fcalcom%2Fcal.com&env=DATABASE_URL,NEXT_PUBLIC_WEBAPP_URL,NEXTAUTH_URL,NEXTAUTH_SECRET,CRON_API_KEY,CALENDSO_ENCRYPTION_KEY,NEXT_PUBLIC_LICENSE_CONSENT&envDescription=See%20all%20available%20env%20vars&envLink=https%3A%2F%2Fgithub.com%2Fcalcom%2Fcal.com%2Fblob%2Fmain%2F.env.example&project-name=cal&repo-name=cal.com&build-command=cd%20../..%20%26%26%20yarn%20build&root-directory=apps%2Fweb%2F)\n\n\n\n## Roadmap\n\n\"Cal.com\n\nSee the [roadmap project](https://cal.com/roadmap) for a list of proposed features (and known issues). You can change the view to see planned tagged releases.\n\n\n\n## Contributing\n\nPlease see our [contributing guide](/CONTRIBUTING.md).\n\n### Good First Issues\n\nWe have a list of [help wanted](https://github.com/orgs/calcom/projects/1/views/25) that contain small features and bugs which have a relatively limited scope. This is a great place to get started, gain experience, and get familiar with our contribution process.\n\n## Integrations\n\n### Obtaining the Google API Credentials\n\n1. Open [Google API Console](https://console.cloud.google.com/apis/dashboard). If you don't have a project in your Google Cloud subscription, you'll need to create one before proceeding further. Under Dashboard pane, select Enable APIS and Services.\n2. In the search box, type calendar and select the Google Calendar API search result.\n3. Enable the selected API.\n4. Next, go to the [OAuth consent screen](https://console.cloud.google.com/apis/credentials/consent) from the side pane. Select the app type (Internal or External) and enter the basic app details on the first page.\n5. In the second page on Scopes, select Add or Remove Scopes. Search for Calendar.event and select the scope with scope value `.../auth/calendar.events`, `.../auth/calendar.readonly` and select Update.\n6. In the third page (Test Users), add the Google account(s) you'll using. Make sure the details are correct on the last page of the wizard and your consent screen will be configured.\n7. Now select [Credentials](https://console.cloud.google.com/apis/credentials) from the side pane and then select Create Credentials. Select the OAuth Client ID option.\n8. Select Web Application as the Application Type.\n9. Under Authorized redirect URI's, select Add URI and then add the URI `/api/integrations/googlecalendar/callback` replacing Cal.com URL with the URI at which your application runs.\n10. The key will be created and you will be redirected back to the Credentials page. Select the newly generated client ID under OAuth 2.0 Client IDs.\n11. Select Download JSON. Copy the contents of this file and paste the entire JSON string in the .env file as the value for GOOGLE_API_CREDENTIALS key.\n\n#### *Adding google calendar to Cal.com App Store*\n\nAfter adding Google credentials, you can now Google Calendar App to the app store.\nYou can repopulate the App store by running\n\n```\ncd packages/prisma\nyarn seed-app-store\n```\n\nYou will need to complete a few more steps to activate Google Calendar App.\nMake sure to complete section \"Obtaining the Google API Credentials\". After the do the\nfollowing\n\n1. Add extra redirect URL `/api/auth/callback/google`\n1. Under 'OAuth concent screen', click \"PUBLISH APP\"\n### Obtaining Microsoft Graph Client ID and Secret\n\n1. Open [Azure App Registration](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) and select New registration\n2. Name your application\n3. Set **Who can use this application or access this API?** to **Accounts in any organizational directory (Any Azure AD directory - Multitenant)**\n4. Set the **Web** redirect URI to `/api/integrations/office365calendar/callback` replacing Cal.com URL with the URI at which your application runs.\n5. Use **Application (client) ID** as the **MS_GRAPH_CLIENT_ID** attribute value in .env\n6. Click **Certificates & secrets** create a new client secret and use the value as the **MS_GRAPH_CLIENT_SECRET** attribute\n\n### Obtaining Slack Client ID and Secret and Signing Secret\n\nTo test this you will need to create a Slack app for yourself on [their apps website](https://api.slack.com/apps).\n\nCopy and paste the app manifest below into the setting on your slack app. Be sure to replace `YOUR_DOMAIN` with your own domain or your proxy host if you're testing locally.\n\n
\n App Manifest\n \n ```yaml\n display_information:\n name: Cal.com Slack\nfeatures:\n bot_user:\n display_name: Cal.com Slack\n always_online: false\n slash_commands:\n - command: /create-event\n url: https://YOUR_DOMAIN/api/integrations/slackmessaging/commandHandler\n description: Create an event within Cal!\n should_escape: false\n - command: /today\n url: https://YOUR_DOMAIN/api/integrations/slackmessaging/commandHandler\n description: View all your bookings for today\n should_escape: false\noauth_config:\n redirect_urls:\n - https://YOUR_DOMAIN/api/integrations/slackmessaging/callback\n scopes:\n bot:\n - chat:write\n - commands\n - chat:write.public \nsettings:\n interactivity:\n is_enabled: true\n request_url: https://YOUR_DOMAIN/api/integrations/slackmessaging/interactiveHandler\n message_menu_options_url: https://YOUR_DOMAIN/api/integrations/slackmessaging/interactiveHandler\n org_deploy_enabled: false\n socket_mode_enabled: false\n token_rotation_enabled: false\n```\n\n
\n\nAdd the integration as normal - slack app - add. Follow the oauth flow to add it to a server.\n\nNext make sure you have your app running `yarn dx`. Then in the slack chat type one of these commands: `/create-event` or `/today`\n\n> NOTE: Next you will need to setup a proxy server like [ngrok](https://ngrok.com/) to allow your local host machine to be hosted on a public https server.\n\n### Obtaining Zoom Client ID and Secret\n\n1. Open [Zoom Marketplace](https://marketplace.zoom.us/) and sign in with your Zoom account.\n2. On the upper right, click \"Develop\" => \"Build App\".\n3. On \"OAuth\", select \"Create\".\n4. Name your App.\n5. Choose \"User-managed app\" as the app type.\n6. De-select the option to publish the app on the Zoom App Marketplace.\n7. Click \"Create\".\n8. Now copy the Client ID and Client Secret to your .env file into the `ZOOM_CLIENT_ID` and `ZOOM_CLIENT_SECRET` fields.\n9. Set the Redirect URL for OAuth `/api/integrations/zoomvideo/callback` replacing Cal.com URL with the URI at which your application runs.\n10. Also add the redirect URL given above as a allow list URL and enable \"Subdomain check\". Make sure, it says \"saved\" below the form.\n11. You don't need to provide basic information about your app. Instead click at \"Scopes\" and then at \"+ Add Scopes\". On the left, click the category \"Meeting\" and check the scope `meeting:write`.\n12. Click \"Done\".\n13. You're good to go. Now you can easily add your Zoom integration in the Cal.com settings.\n\n### Obtaining Daily API Credentials\n\n1. Open [Daily](https://www.daily.co/) and sign into your account.\n2. From within your dashboard, go to the [developers](https://dashboard.daily.co/developers) tab.\n3. Copy your API key.\n4. Now paste the API key to your .env file into the `DAILY_API_KEY` field in your .env file.\n5. If you have the [Daily Scale Plan](https://www.daily.co/pricing) set the `DAILY_SCALE_PLAN` variable to `true` in order to use features like video recording.\n\n### Obtaining HubSpot Client ID and Secret\n\n1. Open [HubSpot Developer](https://developer.hubspot.com/) and sign into your account, or create a new one.\n2. From within the home of the Developer account page, go to \"Manage apps\".\n3. Click \"Create app\" button top right.\n4. Fill in any information you want in the \"App info\" tab\n5. Go to tab \"Auth\"\n6. Now copy the Client ID and Client Secret to your .env file into the `HUBSPOT_CLIENT_ID` and `HUBSPOT_CLIENT_SECRET` fields.\n7. Set the Redirect URL for OAuth `/api/integrations/hubspotothercalendar/callback` replacing Cal.com URL with the URI at which your application runs.\n8. In the \"Scopes\" section at the bottom of the page, make sure you select \"Read\" and \"Write\" for scope called `crm.objects.contacts`\n9. Click the \"Save\" button at the bottom footer.\n10. You're good to go. Now you can see any booking in Cal.com created as a meeting in HubSpot for your contacts.\n\n### Obtaining Vital API Keys\n\n1. Open [Vital](https://tryvital.io/) and click Get API Keys.\n1. Create a team with the team name you desire\n1. Head to the configuration section on the sidebar of the dashboard\n1. Click on API keys and you'll find your sandbox `api_key`.\n1. Copy your `api_key` to `VITAL_API_KEY` in the .env.appStore file.\n1. Open [Vital Webhooks](https://app.tryvital.io/team/{team_id}/webhooks) and add `/api/integrations/vital/webhook` as webhook for connected applications.\n1. Select all events for the webhook you interested, e.g. `sleep_created`\n1. Copy the webhook secret (`sec...`) to `VITAL_WEBHOOK_SECRET` in the .env.appStore file.\n\n## Workflows\n\n### Setting up SendGrid for Email reminders\n\n1. Create a SendGrid account (https://signup.sendgrid.com/)\n2. Go to Settings -> API keys and create an API key\n3. Copy API key to your .env file into the SENDGRID_API_KEY field\n4. Go to Settings -> Sender Authentication and verify a single sender\n5. Copy the verified E-Mail to your .env file into the SENDGRID_EMAIL field\n\n### Setting up Twilio for SMS reminders\n\n1. Create a Twilio account (https://www.twilio.com/try-twilio)\n2. Click \u2018Get a Twilio phone number\u2019\n3. Copy Account SID to your .env file into the TWILIO_SID field\n4. Copy Auth Token to your .env file into the TWILIO_TOKEN field\n5. Create a messaging service (Develop -> Messaging -> Services)\n6. Choose any name for the messaging service\n7. Click 'Add Senders'\n8. Choose phone number as sender type\n9. Add the listed phone number\n10. Leave all other fields as they are\n11. Complete setup and click \u2018View my new Messaging Service\u2019\n12. Copy Messaging Service SID to your .env file into the TWILIO_MESSAGING_SID field\n\n\n\n## License\n\nDistributed under the AGPLv3 License. See `LICENSE` for more information.\n\n\n\n## Acknowledgements\n\nSpecial thanks to these amazing projects which help power Cal.com:\n\n[](https://vercel.com/?utm_source=calend-so&utm_campaign=oss)\n\n- [Vercel](https://vercel.com/?utm_source=calend-so&utm_campaign=oss)\n- [Next.js](https://nextjs.org/)\n- [Day.js](https://day.js.org/)\n- [Tailwind CSS](https://tailwindcss.com/)\n- [Prisma](https://prisma.io/)\n\n\"Jitsu.com\"\n\nCal.com is an [open startup](https://cal.com/open) and [Jitsu](https://github.com/jitsucom/jitsu) (an open-source Segment alternative) helps us to track most of the usage metrics.\n", - "source_links": [], - "id": 14 - }, - { - "page_link": "https://github.com/chatwoot/chatwoot", - "title": "chatwoot readme", - "text": "

\n \"Woot-logo\"\n\n

Customer engagement suite, an open-source alternative to Intercom, Zendesk, Salesforce Service Cloud etc.

\n

\n\n

\n \n \"Deploy\"\n \n \n \"Deploy\n \n

\n\n___\n\n

\n \"Maintainability\"\n \"CircleCI\n \"Docker\n \"Docker\n \"Commits-per-month\"\n \n \"Discord\"\n \"Huntr\"\n \"uptime\"\n \"response\n \"Artifact\n

\n\n\"Chat\n\n\n\nChatwoot is an open-source, self-hosted customer engagement suite. Chatwoot lets you view and manage your customer data, communicate with them irrespective of which medium they use, and re-engage them based on their profile.\n\n## Features\n\nChatwoot supports the following conversation channels:\n\n - **Website**: Talk to your customers using our live chat widget and make use of our SDK to identify a user and provide contextual support.\n - **Facebook**: Connect your Facebook pages and start replying to the direct messages to your page.\n - **Instagram**: Connect your Instagram profile and start replying to the direct messages.\n - **Twitter**: Connect your Twitter profiles and reply to direct messages or the tweets where you are mentioned.\n - **Telegram**: Connect your Telegram bot and reply to your customers right from a single dashboard.\n - **WhatsApp**: Connect your WhatsApp business account and manage the conversation in Chatwoot.\n - **Line**: Connect your Line account and manage the conversations in Chatwoot.\n - **SMS**: Connect your Twilio SMS account and reply to the SMS queries in Chatwoot.\n - **API Channel**: Build custom communication channels using our API channel.\n - **Email**: Forward all your email queries to Chatwoot and view it in our integrated dashboard.\n\nAnd more.\n\nOther features include:\n\n- **CRM**: Save all your customer information right inside Chatwoot, use contact notes to log emails, phone calls, or meeting notes.\n- **Custom Attributes**: Define custom attribute attributes to store information about a contact or a conversation and extend the product to match your workflow.\n- **Shared multi-brand inboxes**: Manage multiple brands or pages using a shared inbox.\n- **Private notes**: Use @mentions and private notes to communicate internally about a conversation.\n- **Canned responses (Saved replies)**: Improve the response rate by adding saved replies for frequently asked questions.\n- **Conversation Labels**: Use conversation labels to create custom workflows.\n- **Auto assignment**: Chatwoot intelligently assigns a ticket to the agents who have access to the inbox depending on their availability and load.\n- **Conversation continuity**: If the user has provided an email address through the chat widget, Chatwoot will send an email to the customer under the agent name so that the user can continue the conversation over the email.\n- **Multi-lingual support**: Chatwoot supports 10+ languages.\n- **Powerful API & Webhooks**: Extend the capability of the software using Chatwoot\u2019s webhooks and APIs.\n- **Integrations**: Chatwoot natively integrates with Slack right now. Manage your conversations in Slack without logging into the dashboard.\n\n## Documentation\n\nDetailed documentation is available at [chatwoot.com/help-center](https://www.chatwoot.com/help-center).\n\n## Translation process\n\nThe translation process for Chatwoot web and mobile app is managed at [https://translate.chatwoot.com](https://translate.chatwoot.com) using Crowdin. Please read the [translation guide](https://www.chatwoot.com/docs/contributing/translating-chatwoot-to-your-language) for contributing to Chatwoot.\n\n## Branching model\n\nWe use the [git-flow](https://nvie.com/posts/a-successful-git-branching-model/) branching model. The base branch is `develop`.\nIf you are looking for a stable version, please use the `master` or tags labelled as `v1.x.x`.\n\n## Deployment\n\n### Heroku one-click deploy\n\nDeploying Chatwoot to Heroku is a breeze. It's as simple as clicking this button:\n\n[![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/chatwoot/chatwoot/tree/master)\n\nFollow this [link](https://www.chatwoot.com/docs/environment-variables) to understand setting the correct environment variables for the app to work with all the features. There might be breakages if you do not set the relevant environment variables.\n\n\n### DigitalOcean 1-Click Kubernetes deployment\n\nChatwoot now supports 1-Click deployment to DigitalOcean as a kubernetes app.\n\n\n \"Deploy\n\n\n### Other deployment options\n\nFor other supported options, checkout our [deployment page](https://chatwoot.com/deploy). \n\n## Security\n\nLooking to report a vulnerability? Please refer our [SECURITY.md](./SECURITY.md) file.\n\n\n## Community? Questions? Support ?\n\nIf you need help or just want to hang out, come, say hi on our [Discord](https://discord.gg/cJXdrwS) server.\n\n\n## Contributors \u2728\n\nThanks goes to all these [wonderful people](https://www.chatwoot.com/docs/contributors):\n\n\n\n\n*Chatwoot* © 2017-2022, Chatwoot Inc - Released under the MIT License.\n", - "source_links": [], - "id": 15 - }, - { - "page_link": "https://github.com/Altinity/clickhouse-operator", - "title": "clickhouse readme", - "text": "# Altinity Operator for ClickHouse\n\nOperator creates, configures and manages ClickHouse clusters running on Kubernetes.\n\n[![Build Master](https://github.com/Altinity/clickhouse-operator/actions/workflows/build_master.yaml/badge.svg)](https://github.com/Altinity/clickhouse-operator/actions/workflows/build_master.yaml)\n[![GitHub release](https://img.shields.io/github/v/release/altinity/clickhouse-operator?include_prereleases)](https://img.shields.io/github/v/release/altinity/clickhouse-operator?include_prereleases)\n[![tags](https://img.shields.io/github/tag/altinity/clickhouse-operator.svg)](https://github.com/altinity/clickhouse-operator/tags)\n[![Docker Pulls](https://img.shields.io/docker/pulls/altinity/clickhouse-operator.svg)](https://hub.docker.com/r/altinity/clickhouse-operator)\n[![Go version](https://img.shields.io/github/go-mod/go-version/altinity/clickhouse-operator)](https://img.shields.io/github/go-mod/go-version/altinity/clickhouse-operator)\n[![Go Report Card](https://goreportcard.com/badge/github.com/altinity/clickhouse-operator)](https://goreportcard.com/report/github.com/altinity/clickhouse-operator)\n[![issues](https://img.shields.io/github/issues/altinity/clickhouse-operator.svg)](https://github.com/altinity/clickhouse-operator/issues)\n\n## Features\n\n- Creates ClickHouse clusters defined as custom resources\n- Customized storage provisioning (VolumeClaim templates)\n- Customized pod templates\n- Customized service templates for endpoints\n- ClickHouse configuration management\n- ClickHouse users management\n- ClickHouse cluster scaling including automatic schema propagation\n- ClickHouse version upgrades\n- Exporting ClickHouse metrics to Prometheus\n\n## Requirements\n\n * Kubernetes 1.15.11+\n \n## Documentation\n\n[Quick Start Guide][quick_start_guide]\n\n**Advanced configuration**\n * [Detailed Operator Installation Instructions][detailed_installation_instructions]\n * [Operator Configuration][operator_configuration]\n * [Setup ClickHouse cluster with replication][replication_setup]\n * [Setting up Zookeeper][zookeeper_setup]\n * [Persistent Storage Configuration][storage_configuration]\n * [Security Hardening][security_hardening]\n * [ClickHouse Installation Custom Resource specification][crd_explained]\n \n**Maintenance tasks**\n * [Add replication to an existing ClickHouse cluster][update_cluster_add_replication]\n * [Schema maintenance][schema_migration]\n * [Update ClickHouse version][update_clickhouse_version]\n * [Update Operator version][update_operator]\n\n**Monitoring**\n * [Setup Monitoring][monitoring_setup]\n * [Prometheus & clickhouse-operator integration][prometheus_setup]\n * [Grafana & Prometheus integration][grafana_setup]\n\n**How to contribute**\n * [How to contribute/submit a patch][contributing_manual]\n * [How to easy development process with devspace.sh][devspace_manual]\n \n---\n * [Documentation index][all_docs_list]\n---\n \n## License\n\nCopyright (c) 2019-2023, Altinity Inc and/or its affiliates. All rights reserved.\n\nAltinity Operator for ClickHouse is licensed under the Apache License 2.0.\n\nSee [LICENSE](./LICENSE) for more details.\n \n[chi_max_yaml]: ./docs/chi-examples/99-clickhouseinstallation-max.yaml\n[intro]: ./docs/introduction.md\n[quick_start_guide]: ./docs/quick_start.md\n[detailed_installation_instructions]: ./docs/operator_installation_details.md\n[replication_setup]: ./docs/replication_setup.md\n[crd_explained]: ./docs/custom_resource_explained.md\n[zookeeper_setup]: ./docs/zookeeper_setup.md\n[monitoring_setup]: ./docs/monitoring_setup.md\n[prometheus_setup]: ./docs/prometheus_setup.md\n[grafana_setup]: ./docs/grafana_setup.md\n[storage_configuration]: ./docs/storage.md\n[update_cluster_add_replication]: ./docs/chi_update_add_replication.md\n[update_clickhouse_version]: ./docs/chi_update_clickhouse_version.md\n[update_operator]: ./docs/operator_upgrade.md\n[schema_migration]: ./docs/schema_migration.md\n[operator_configuration]: ./docs/operator_configuration.md\n[contributing_manual]: ./CONTRIBUTING.md\n[devspace_manual]: ./docs/devspace.md\n[all_docs_list]: ./docs/README.md\n[security_hardening]: ./docs/security_hardening.md\n", - "source_links": [], - "id": 16 - }, - { - "page_link": "exposing-via-loadbalancer.md", - "title": "exposing-via-loadbalancer", - "text": "# Expose your clickhouse installation via a load balancer\n\nYou can opt into using a load balancer service to expose your clickhouse installation. This can be especially useful if you'd like services outside your k8s cluster to access it. It requires a minor change to your ClickhouseInstallation crd, like so:\n\n```yaml\napiVersion: \"clickhouse.altinity.com/v1\"\nkind: \"ClickHouseInstallation\"\nmetadata:\n \n # your clickhouse metadata\n\nspec:\n \n # additional clickhouse spec\n\n templates:\n serviceTemplates:\n - name: chi-service-template\n generateName: \"service-{chi}\"\n # type ObjectMeta struct from k8s.io/meta/v1\n metadata:\n labels:\n custom.label: \"custom.value\"\n annotations:\n # For more details on Internal Load Balancer check\n # https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer\n external-dns.alpha.kubernetes.io/hostname: \"your.dns.name\" # NOTE: this should be under the domain specified in `workspace.yaml`\n\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing # can also be \"internal\" if you'd rather it be only accessible w/in your vpc\n service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp\n service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true'\n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: \"*\"\n\n ## aws and gcp annotations to opt into private network load balancing\n networking.gke.io/load-balancer-type: \"Internal\"\n service.beta.kubernetes.io/azure-load-balancer-internal: \"true\"\n\n # NLB Load Balancer\n service.beta.kubernetes.io/aws-load-balancer-type: \"nlb\"\n # type ServiceSpec struct from k8s.io/core/v1\n spec:\n ports:\n - name: http\n port: 8123\n - name: tcp\n port: 9000\n type: LoadBalancer\n```\n\n", - "source_links": [], - "id": 17 - }, - { - "page_link": "setup.md", - "title": "setup", - "text": "# Setting Up Your first clickhouse instance\n\nThis deploys a kubernetes operator which allows you to proivision as many clickhouse instances as you see fit. You'll need to create your instance of its crd, which you can add to `clickhouse/helm/clickhouse/templates`. Here's an example yaml file to get you started:\n\n```yaml\napiVersion: \"clickhouse.altinity.com/v1\"\nkind: \"ClickHouseInstallation\"\nmetadata:\n name: \"clickhouse\"\nspec:\n configuration:\n clusters:\n - name: \"cluster\"\n layout:\n shardsCount: 3\n replicasCount: 2\n```\n\nThe operator is actually quite well documented, and you can dive deeper by checking out some of the examples here: https://github.com/Altinity/clickhouse-operator/tree/master/docs/chi-examples if you want a more sophisticated setup", - "source_links": [], - "id": 18 - }, - { - "page_link": null, - "title": "cluster-api-operator readme", - "text": null, - "source_links": [], - "id": 19 - }, - { - "page_link": null, - "title": "cluster-api-operator-installer readme", - "text": null, - "source_links": [], - "id": 20 - }, - { - "page_link": "https://github.com/pluralsh/console", - "title": "console readme", - "text": "# Plural Console\n\n![Console](assets/public/console-lockup-dark.png)\n\nThe Plural Console is the administrative hub of the plural platform. It has a number of key features:\n\n* Reception of over-the-air application updates\n* Configurable, application-targeted observability\n - dashboards\n - logging\n* Common incident management, including zoom integration and slash commands\n* Interactive Runbooks\n\nWe strive to make it powerful enough to make you feel like any application you deploy using Plural has an operational profile comparable to a managed service, even without being one.\n\n## Development\n\nConsole's server side is written in elixir, and exposes a graphql api. The frontend is in react, all code lives in this single repo and common development tasks can be done using the Makefile at the root of the repo.\n\n\n### Developing Web\nTo begin developing the web app, install npm & yarn, then run:\n\n```sh\ncd assets && yarn install && cd -\nmake web\n```\n\n### Developing Server\nTo make changes to the server codebase, you'll want to install elixir on your machine. For mac desktops, we do this via asdf, which can be done simply at the root of the repo like so:\n\n```sh\nasdf install\n```\n\nOnce elixir is available, all server dependencies are managed via docker-compose, and tests can be run via `mix`, like so:\n\n```sh\nmake testup\nmix local.hex\nmix deps.get\nmix test\n```\n\n### Troubleshooting\n#### Installing Erlang \nIf `asdf install` fails with `cannot find required auxiliary files: install-sh config.guess config.sub` then run:\n\n```sh\nbrew install autoconf@2.69 && \\\nbrew link --overwrite autoconf@2.69 && \\\nautoconf -V\n```\n\nFor Mac Machines, if unable to download Erlang via `asdf` then run:\n\n```sh\nbrew install erlang@23\ncp -r /opt/homebrew/opt/erlang@23/lib/erlang ~/.asdf/installs/erlang/23.1.5\nasdf reshim erlang 23.1.5\n```", - "source_links": [], - "id": 21 - }, - { - "page_link": null, - "title": "counterstrike readme", - "text": null, - "source_links": [], - "id": 22 - }, - { - "page_link": "https://github.com/crossplane/crossplane", - "title": "crossplane readme", - "text": "![CI](https://github.com/crossplane/crossplane/workflows/CI/badge.svg) [![GitHub release](https://img.shields.io/github/release/crossplane/crossplane/all.svg?style=flat-square)](https://github.com/crossplane/crossplane/releases) [![Docker Pulls](https://img.shields.io/docker/pulls/crossplane/crossplane.svg)](https://hub.docker.com/r/crossplane/crossplane) [![Go Report Card](https://goreportcard.com/badge/github.com/crossplane/crossplane)](https://goreportcard.com/report/github.com/crossplane/crossplane) [![Slack](https://slack.crossplane.io/badge.svg)](https://slack.crossplane.io) [![Twitter Follow](https://img.shields.io/twitter/follow/crossplane_io.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=crossplane_io&user_id=788180534543339520)\n\n![Crossplane](docs/media/banner.png)\n\n\nCrossplane is a framework for building cloud native control planes without\nneeding to write code. It has a highly extensible backend that enables you to\nbuild a control plane that can orchestrate applications and infrastructure no\nmatter where they run, and a highly configurable frontend that puts you in\ncontrol of the schema of the declarative API it offers.\n\nCrossplane is a [Cloud Native Compute Foundation][cncf] project.\n\n## Releases\n\nCurrently maintained releases, as well as the next upcoming release are listed\nbelow. For more information take a look at the Crossplane [release cycle\ndocumentation].\n\n| Release | Release Date | EOL |\n|:-------:|:------------:|:-------------:|\n| v1.7 | Mar 22, 2022 | Sept 2022 |\n| v1.8 | May 17, 2022 | Nov 2022 |\n| v1.9 | Jul 14, 2022 | Jan 2023 |\n\nYou can subscribe to the [community calendar] to track all release dates, and\nfind the most recent releases on the [releases] page.\n\n## Get Involved\n\nCrossplane is a community driven project; we welcome your contribution. To file\na bug, suggest an improvement, or request a new feature please open an [issue\nagainst Crossplane] or the relevant provider. Refer to our [contributing guide]\nfor more information on how you can help.\n\n* Discuss Crossplane on [Slack] or our [developer mailing list].\n* Follow us on [Twitter], or contact us via [Email].\n* Join our regular community meetings.\n* Provide feedback on our [roadmap].\n\nThe Crossplane community meeting takes place every other [Thursday at 10:00am\nPacific Time][community meeting time]. Anyone who wants to discuss the direction\nof the project, design and implementation reviews, or raise general questions\nwith the broader community is encouraged to join.\n\n* Meeting link: \n* [Current agenda and past meeting notes]\n* [Past meeting recordings]\n* [Community Calendar][community calendar]\n\n## License\n\nCrossplane is under the Apache 2.0 license.\n\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fcrossplane%2Fcrossplane.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fcrossplane%2Fcrossplane?ref=badge_large)\n\n\n\n[Crossplane]: https://crossplane.io\n[release cycle documentation]: https://crossplane.io/docs/master/reference/release-cycle.html\n[install]: https://crossplane.io/docs/latest\n[Slack]: https://slack.crossplane.io\n[developer mailing list]: https://groups.google.com/forum/#!forum/crossplane-dev\n[Twitter]: https://twitter.com/crossplane_io\n[Email]: mailto:info@crossplane.io\n[issue against Crossplane]: https://github.com/crossplane/crossplane/issues\n[contributing guide]: CONTRIBUTING.md\n[community meeting time]: https://www.thetimezoneconverter.com/?t=10:00&tz=PT%20%28Pacific%20Time%29\n[Current agenda and past meeting notes]: https://docs.google.com/document/d/1q_sp2jLQsDEOX7Yug6TPOv7Fwrys6EwcF5Itxjkno7Y/edit?usp=sharing\n[Past meeting recordings]: https://www.youtube.com/playlist?list=PL510POnNVaaYYYDSICFSNWFqNbx1EMr-M\n[roadmap]: https://github.com/orgs/crossplane/projects/12\n[cncf]: https://www.cncf.io/\n[community calendar]: https://calendar.google.com/calendar/embed?src=c_2cdn0hs9e2m05rrv1233cjoj1k%40group.calendar.google.com\n[releases]: https://github.com/crossplane/crossplane/releases", - "source_links": [], - "id": 23 - }, - { - "page_link": null, - "title": "csgo readme", - "text": null, - "source_links": [], - "id": 24 - }, - { - "page_link": "https://github.com/cube-js/cube", - "title": "cube readme", - "text": "

\n \"Cube\n

\n\n[Website](https://cube.dev?ref=github-readme) \u2022 [Getting Started](https://cube.dev/docs/getting-started?ref=github-readme) \u2022 [Docs](https://cube.dev/docs?ref=github-readme) \u2022 [Examples](https://cube.dev/docs/examples?ref=github-readme) \u2022 [Blog](https://cube.dev/blog?ref=github-readme) \u2022 [Slack](https://slack.cube.dev?ref=github-readme) \u2022 [Twitter](https://twitter.com/the_cube_dev)\n\n[![npm version](https://badge.fury.io/js/%40cubejs-backend%2Fserver.svg)](https://badge.fury.io/js/%40cubejs-backend%2Fserver)\n[![GitHub Actions](https://github.com/cube-js/cube/workflows/Build/badge.svg)](https://github.com/cube-js/cube/actions?query=workflow%3ABuild+branch%3Amaster)\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fcube-js%2Fcube.js.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fcube-js%2Fcube.js?ref=badge_shield)\n\n__Cube is the semantic layer for building data applications.__ It helps data engineers and application developers access data from modern data stores, organize it into consistent definitions, and deliver it to every application.\n\n\n\n

\n Learn more about connecting Cube to data sources and analytics & visualization tools. \n

\n\nCube was designed to work with all SQL-enabled data sources, including cloud data warehouses like Snowflake or Google BigQuery, query engines like Presto or Amazon Athena, and application databases like Postgres. Cube has a built-in relational caching engine to provide sub-second latency and high concurrency for API requests.\n\nFor more details, see the [introduction](https://cube.dev/docs/cubejs-introduction?ref=github-readme) page in our documentation. \n\n## Why Cube?\n\nIf you are building a data application\u2014such as a business intelligence tool or a customer-facing analytics feature\u2014you\u2019ll probably face the following problems:\n\n1. __SQL code organization.__ Sooner or later, modeling even a dozen metrics with a dozen dimensions using pure SQL queries becomes a maintenance nightmare, which leads to building a modeling framework.\n2. __Performance.__ Most of the time and effort in modern analytics software development is spent providing adequate time to insight. In a world where every company\u2019s data is big data, writing just SQL queries to get insight isn\u2019t enough anymore.\n3. __Access Control.__ It is important to secure and govern access to data for all downstream data consuming applications.\n\nCube has the necessary infrastructure and features to implement efficient data modeling, access control, and performance optimizations so that every application\u2014like embedded analytics, dashboarding and reporting tools, data notebooks, and other tools\u2014can access consistent data via REST, SQL, and GraphQL APIs.\n\n![](https://raw.githubusercontent.com/cube-js/cube.js/master/docs/content/old-was-vs-cubejs-way.png)\n\n## Getting Started \ud83d\ude80\n\n### Cube Cloud\n\n[Cube Cloud](https://cube.dev/cloud?ref=github-readme) is the fastest way to get started with Cube. It provides managed infrastructure as well as an instant and free access for development projects and proofs of concept.\n\n\"Get\n\nFor a step-by-step guide on Cube Cloud, [see the docs](https://cube.dev/docs/getting-started/cloud/overview?ref=github-readme).\n\n### Docker\n\nAlternatively, you can get started with Cube locally or self-host it with [Docker](https://www.docker.com/).\n\nOnce Docker is installed, in a new folder for your project, run the following command:\n\n```bash\ndocker run -p 4000:4000 \\\n -p 15432:15432 \\\n -v ${PWD}:/cube/conf \\\n -e CUBEJS_DEV_MODE=true \\\n cubejs/cube\n```\n\nThen, open http://localhost:4000 in your browser to continue setup.\n\nFor a step-by-step guide on Docker, [see the docs](https://cube.dev/docs/getting-started-docker?ref=github-readme).\n\n## Resources\n\n- [Documentation](https://cube.dev/docs?ref=github-readme)\n- [Getting Started](https://cube.dev/docs/getting-started?ref=github-readme)\n- [Examples & Tutorials](https://cube.dev/docs/examples?ref=github-readme)\n- [Architecture](https://cube.dev/docs/cubejs-introduction?ref=github-readme#architecture)\n\n## Community\n\nIf you have any questions or need help - [please join our Slack community](https://slack.cube.dev?ref=github-readme) of amazing developers and data engineers.\n\nYou are also welcome to join our **monthly community calls** where we discuss community news, Cube Dev team's plans, backlogs, use cases, etc. If you miss the call, the recordings will also be available after the meeting. \n* When: Second Wednesday of each month at [9am Pacific Time](https://www.thetimezoneconverter.com/?t=09:00&tz=PT%20%28Pacific%20Time%29). \n* Meeting link: https://us02web.zoom.us/j/86717042169?pwd=VlBEd2VVK01DNDVVbU1EUXd5ajhsdz09\n* [Meeting page](https://cube.dev/community-call/). \n* Recordings will be posted on the [Community Call Playlist](https://www.youtube.com/playlist?list=PLtdXl_QTQjpb1dHZCM09qKTsgvgqjSvc9). \n\n### Our quarterly roadmap\n\nWe publish our open source roadmap every quarter and discuss them during our [monthly community calls](https://cube.dev/community-call/). You can find our roadmap under [projects in our Cube.js repository](https://github.com/cube-js/cube/projects?query=is%3Aopen+sort%3Aupdated-desc). \n\n### Contributing\n\nThere are many ways you can contribute to Cube! Here are a few possibilities:\n\n* Star this repo and follow us on [Twitter](https://twitter.com/the_cube_dev).\n* Add Cube to your stack on [Stackshare](https://stackshare.io/cube-js).\n* Upvote issues with \ud83d\udc4d reaction so we know what's the demand for particular issue to prioritize it within road map.\n* Create issues every time you feel something is missing or goes wrong.\n* Ask questions on [Stack Overflow with cube.js tag](https://stackoverflow.com/questions/tagged/cube.js) if others can have these questions as well.\n* Provide pull requests for all open issues and especially for those with [help wanted](https://github.com/cube-js/cube/issues?q=is%3Aissue+is%3Aopen+label%3A\"help+wanted\") and [good first issue](https://github.com/cube-js/cube/issues?q=is%3Aissue+is%3Aopen+label%3A\"good+first+issue\") labels.\n\nAll sort of contributions are **welcome and extremely helpful** \ud83d\ude4c Please refer to [the contribution guide](https://github.com/cube-js/cube/blob/master/CONTRIBUTING.md) for more information.\n\n## License\n\nCube Client is [MIT licensed](./packages/cubejs-client-core/LICENSE).\n\nCube Backend is [Apache 2.0 licensed](./packages/cubejs-server/LICENSE).\n\n\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fcube-js%2Fcube.js.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fcube-js%2Fcube.js?ref=badge_large)\n", - "source_links": [], - "id": 25 - }, - { - "page_link": "add-your-datasources.md", - "title": "add-your-datasources", - "text": "# Add Your Cube Datasources\n\nCube allows you to connect to multiple datasources. To configure them with Plural, you'll need to update the `cube/helm/cube/values.yaml` file.\nAll supported datasources could be found [here](https://cube.dev/docs/config/databases).\n\n\nExample of a postgres datasource\n```yaml\ncube:\n cube:\n datasources:\n default: # one default datasource is required\n type: postgres\n host: \n port: \"5432\"\n name: postgres\n user: postgres\n pass: \n postgres_2: # you can define the name you want\n type: postgres\n host: \n port: \"5432\"\n name: postgres\n user: postgres\n pass: \n```\n\nLooking at a specific datasource? Check next parts.\n\n### Datasources configuration\n\n| Name | Description | Value |\n| ------------- | ------------------------------------------------------------------------ | -------------- |\n| `datasources` | map of named datasources. The first datasource has to be named \"default\" | { default: {}} |\n\n### Common datasource parameters\n\n| Name | Description | Value |\n| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------- | ------- |\n| `datasources..type` | A database type supported by Cube.js | |\n| `datasources..url` | The URL for a database | |\n| `datasources..host` | The host URL for a database | |\n| `datasources..port` | The port for the database connection | |\n| `datasources..schema` | The schema within the database to connect to | |\n| `datasources..name` | The name of the database to connect to | |\n| `datasources..user` | The username used to connect to the database | |\n| `datasources..pass` | The password used to connect to the database | |\n| `datasources..passFromSecret.name` | The password used to connect to the database (using secret) | |\n| `datasources..passFromSecret.key` | The password used to connect to the database (using secret) | |\n| `datasources..domain` | A domain name within the database to connect to | |\n| `datasources..socketPath` | The path to a Unix socket for a MySQL database | |\n| `datasources..catalog` | The catalog within the database to connect to | |\n| `datasources..maxPool` | The maximum number of connections to keep active in the database connection pool | |\n| `datasources..queryTimeout` | The timeout value for any queries made to the database by Cube | |\n| `datasources..export.name` | The name of a bucket in cloud storage | |\n| `datasources..export.type` | The cloud provider where the bucket is hosted (gcs, s3) | |\n| `datasources..export.gcs.credentials` | Base64 encoded JSON key file for connecting to Google Cloud | |\n| `datasources..export.gcs.credentialsFromSecret.name` | Base64 encoded JSON key file for connecting to Google Cloud (using secret) | |\n| `datasources..export.gcs.credentialsFromSecret.key` | Base64 encoded JSON key file for connecting to Google Cloud (using secret) | |\n| `datasources..export.aws.key` | The AWS Access Key ID to use for the export bucket | |\n| `datasources..export.aws.secret` | The AWS Secret Access Key to use for the export bucket | |\n| `datasources..export.aws.secretFromSecret.name` | The AWS Secret Access Key to use for the export bucket (using secret) | |\n| `datasources..export.aws.secretFromSecret.key` | The AWS Secret Access Key to use for the export bucket (using secret) | |\n| `datasources..export.aws.region` | The AWS region of the export bucket | |\n| `datasources..export.redshift.arn` | An ARN of an AWS IAM role with permission to write to the configured bucket (see export.name) | |\n| `datasources..ssl.enabled` | If true, enables SSL encryption for database connections from Cube.js | `false` |\n| `datasources..ssl.rejectUnAuthorized` | If true, verifies the CA chain with the system's built-in CA chain | |\n| `datasources..ssl.ca` | The contents of a CA bundle in PEM format, or a path to one | |\n| `datasources..ssl.cert` | The contents of an SSL certificate in PEM format, or a path to one | |\n| `datasources..ssl.key` | The contents of a private key in PEM format, or a path to one | |\n| `datasources..ssl.ciphers` | The ciphers used by the SSL certificate | |\n| `datasources..ssl.serverName` | The server name for the SNI TLS extension | |\n| `datasources..ssl.passPhrase` | he passphrase used to encrypt the SSL private key | |\n\n### Athena datasource parameters\n\n| Name | Description | Value |\n| ------------------------------------------------- | ------------------------------------------------------------------------ | ----- |\n| `datasources..athena.key` | The AWS Access Key ID to use for database connections | |\n| `datasources..athena.keyFromSecret.name` | The AWS Access Key ID to use for database connections (using secret) | |\n| `datasources..athena.keyFromSecret.key` | The AWS Access Key ID to use for database connections (using secret) | |\n| `datasources..athena.region` | The AWS region of the Cube.js deployment | |\n| `datasources..athena.s3OutputLocation` | The S3 path to store query results made by the Cube.js deployment | |\n| `datasources..athena.secret` | The AWS Secret Access Key to use for database connections | |\n| `datasources..athena.secretFromSecret.name` | The AWS Secret Access Key to use for database connections (using secret) | |\n| `datasources..athena.secretFromSecret.key` | The AWS Secret Access Key to use for database connections (using secret) | |\n| `datasources..athena.workgroup` | The name of the workgroup in which the query is being started | |\n| `datasources..athena.catalog` | The name of the catalog to use by default | |\n\n### Bigquery datasource parameters\n\n| Name | Description | Value |\n| -------------------------------------------------------- | ------------------------------------------------------------------------------- | ----- |\n| `datasources..bigquery.projectId` | The Google BigQuery project ID to connect to | |\n| `datasources..bigquery.location` | The Google BigQuery dataset location to connect to | |\n| `datasources..bigquery.credentials` | A Base64 encoded JSON key file for connecting to Google BigQuery | |\n| `datasources..bigquery.credentialsFromSecret.name` | A Base64 encoded JSON key file for connecting to Google BigQuery (using secret) | |\n| `datasources..bigquery.credentialsFromSecret.key` | A Base64 encoded JSON key file for connecting to Google BigQuery (using secret) | |\n| `datasources..bigquery.keyFile` | The path to a JSON key file for connecting to Google BigQuery | |\n\n### Databricks datasource parameters\n\n| Name | Description | Value |\n| -------------------------------------------- | ------------------------------------------------------------------------- | ----- |\n| `datasources..databricks.url` | The URL for a JDBC connection | |\n| `datasources..databricks.acceptPolicy` | Whether or not to accept the license terms for the Databricks JDBC driver | |\n| `datasources..databricks.token` | The personal access token used to authenticate the Databricks connection | |\n| `datasources..databricks.catalog` | Databricks catalog name | |\n\n### Clickhouse datasource parameters\n\n| Name | Description | Value |\n| ---------------------------------------- | ------------------------------------------------------- | ----- |\n| `datasources..clickhouse.readonly` | Whether the ClickHouse user has read-only access or not | |\n\n### Firebolt datasource parameters\n\n| Name | Description | Value |\n| ----------------------------------------- | ---------------------------------------------- | ----- |\n| `datasources..firebolt.account` | Account name | |\n| `datasources..firebolt.engineName` | Engine name to connect to | |\n| `datasources..firebolt.apiEndpoint` | Firebolt API endpoint. Used for authentication | |\n\n### Hive datasource parameters\n\n| Name | Description | Value |\n| --------------------------------------- | ----------------------------------------------- | ----- |\n| `datasources..hive.cdhVersion` | The version of the CDH instance for Apache Hive | |\n| `datasources..hive.thriftVersion` | The version of Thrift Server for Apache Hive | |\n| `datasources..hive.type` | The type of Apache Hive server | |\n| `datasources..hive.version` | The version of Apache Hive | |\n\n### Presto datasource parameters\n\n| Name | Description | Value |\n| ----------------------------------- | --------------------------------------- | ----- |\n| `datasources..presto.catalog` | The catalog within Presto to connect to | |\n\n### Snowflake datasource parameters\n\n| Name | Description | Value |\n| ----------------------------------------------------- | ---------------------------------------------------------------------- | ----- |\n| `datasources..snowFlake.account` | The Snowflake account ID to use when connecting to the database | |\n| `datasources..snowFlake.region` | The Snowflake region to use when connecting to the database | |\n| `datasources..snowFlake.role` | The Snowflake role to use when connecting to the database | |\n| `datasources..snowFlake.warehouse` | The Snowflake warehouse to use when connecting to the database | |\n| `datasources..snowFlake.clientSessionKeepAlive` | If true, keep the Snowflake connection alive indefinitely | |\n| `datasources..snowFlake.authenticator` | The type of authenticator to use with Snowflake. Defaults to SNOWFLAKE | |\n| `datasources..snowFlake.privateKeyPath` | The path to the private RSA key folder | |\n| `datasources..snowFlake.privateKeyPass` | The password for the private RSA key. Only required for encrypted keys | |\n\n### Trino datasource parameters\n\n| Name | Description | Value |\n| ---------------------------------- | -------------------------------------- | ----- |\n| `datasources..trino.catalog` | The catalog within Trino to connect to | |", - "source_links": [], - "id": 26 - }, - { - "page_link": "add-your-models.md", - "title": "add-your-models", - "text": "# Add Your Cube Models\n\nTo overwrite default schema, create a new folder `schemas` inside `cube/helm/cube` folder.\n\nThen you can add your `yaml` or `js` files.\n\nExample of a yaml model file looks like (More info [here](https://cube.dev/docs/schema/getting-started))\n```yaml\ncubes:\n - name: my_table\n sql_table: my_table\n data_source: default\n dimensions:\n - name: id\n sql: id\n type: string\n primary_key: true\n - name: product_id\n sql: product_id\n type: string\n measures:\n - name: count\n type: count\n```\n\nYou can add as many model as you want inside `schemas` folder.\n\nThen, you'll to create a `configmap.yaml` inside `cube/helm/cube/templates` with the following value:\n\n```yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cube-model\ndata:\n{{ (.Files.Glob \"schemas/**.yaml\").AsConfig | indent 2 }} # Note the **.yaml, adjust it if you want to use js models\n```\n\nFinally, you need to edit `cube/helm/cube/values.yaml`\n```yaml\ncube:\n cube:\n config:\n volumes:\n - name: cube-model\n configMap:\n name: cube-model\n volumeMounts:\n - name: cube-model\n readOnly: true\n mountPath: /cube/conf/schema/example.yaml\n subPath: example.yaml\n <...>\n```\n\nOnce that reconfiguration has been made, simply run: `plural build --only cube && plural deploy --commit \"feat(cube): add cube models\"` to apply the changes on your cluster.", - "source_links": [], - "id": 27 - }, - { - "page_link": "https://github.com/dagster-io/dagster", - "title": "dagster readme", - "text": "python_modules/dagster/README.md", - "source_links": [], - "id": 28 - }, - { - "page_link": "private-ingress.md", - "title": "private-ingress", - "text": "# Deploying on a private network\n\nTo deploy your dagster instance on a private network, the simplest solution is to use our provided private ingress class, which can be done easily by adding the following to `dagster/helm/dagster/values.yaml`:\n\n```yaml\ndagster:\n dagster:\n ingress:\n ingressClassName: internal-nginx\n```\n\n(this can also be done in the configuration tab of the plural console for your dagster app)", - "source_links": [], - "id": 29 - }, - { - "page_link": "user-code.md", - "title": "user-code", - "text": "# Add Your Own User Code Deployment\n\nDagster has a concept of user code deployments which allow you to specify multiple independent repositories of dags to register with the same dagster orchestrator. This is a great way to separate dependency trees between codebases or manage a complex data org. An example user code deployment configuration can be seen here, which can be added to `dagster/helm/dagster/values.yaml` or updated directly in the configuration tab of the console's dagster app page:\n\n```yaml\ndagster:\n dagster:\n dagster-user-deployments:\n deployments:\n - dagsterApiGrpcArgs:\n - -m\n - dags\n envSecrets:\n - name: dagster-user-secrets # if you want to add env vars from a k8s secret\n image:\n pullPolicy: Always\n repository: ghcr.io/your/dagster-code\n tag: v0.0.4\n name: dags\n port: 4000\n resources:\n requests:\n cpu: 20m\n memory: 100Mi\n imagePullSecrets:\n - name: gh-creds # additional pull credentials if you would like to use then\n```\n\nIt can be a bit tedious to manually maintain this configuration as your codebase, so we've provided the `plural upgrade` command to automate this out of CI. Here's an example github action doing just that: https://github.com/pluralsh/dagster-example/blob/main/.github/workflows/publish.yml#L49. The `upgrade.yaml` file it references can be seen here: https://github.com/pluralsh/dagster-example/blob/main/upgrade.yaml\n\n", - "source_links": [], - "id": 30 - }, - { - "page_link": "https://github.com/dagster-io/dagster", - "title": "dagster-agent readme", - "text": "python_modules/dagster/README.md", - "source_links": [], - "id": 31 - }, - { - "page_link": "https://github.com/pluralsh/dash-controller", - "title": "dash-controller readme", - "text": "# Dash controller\n\nDash controller is responsible to manage lifecycle of DashApplication objects.\n\n## Local Kubernets\n\nYou can spin up kubernetes cluster using kind.\nThe following script deploy also load balancer and ingress controller.\n\n```bash\n$ example/kind/run-kind.sh\n```\n\n## Installation\n\nInstall CRD: \n```bash\nkubectl create -f config/crd/bases\n```\n\nNow you can deploy the controller:\n\n```bash\nkubectl create -f resources/\n```\n\nGo to `example` directory to deploy your first dash application\n```bash\nkubectl create -f example/dash_picsum.yaml\n```\n\n\n```yaml\napiVersion: dash.plural.sh/v1alpha1\nkind: DashApplication\nmetadata:\n name: picsum\n namespace: default\nspec:\n replicas: 1\n container:\n image: \"zreigz/dash-picsum:0.1.0\"\n containerPort: 8050\n ingress:\n ingressClassName: \"nginx\"\n path: \"/picsum\"\n```\n\nThe controller will create Deployment, Service and Ingress with the DashApplication name: `picsum`\nWhen you deployed kind cluster the application will be available on this address: `http://localhost/picsum`\n", - "source_links": [], - "id": 32 - }, - { - "page_link": "https://github.com/DataDog/helm-charts", - "title": "datadog readme", - "text": "# Datadog Helm Charts\n\n[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/datadog)](https://artifacthub.io/packages/search?repo=datadog) \n\nOfficial Helm charts for Datadog products. Currently supported:\n- [Datadog Agents](charts/datadog/README.md) (datadog/datadog)\n\n## How to use Datadog Helm repository\n\nYou need to add this repository to your Helm repositories:\n\n```\nhelm repo add datadog https://helm.datadoghq.com\nhelm repo update\n```\n", - "source_links": [], - "id": 33 - }, - { - "page_link": "https://github.com/datahub-project/datahub", - "title": "datahub readme", - "text": "\n

\n\"DataHub\"\n

\n\n\n# DataHub: The Metadata Platform for the Modern Data Stack\n## Built with \u2764\ufe0f by [Acryl Data](https://acryldata.io) and [LinkedIn](https://engineering.linkedin.com)\n[![Version](https://img.shields.io/github/v/release/datahub-project/datahub?include_prereleases)](https://github.com/datahub-project/datahub/releases/latest)\n[![PyPI version](https://badge.fury.io/py/acryl-datahub.svg)](https://badge.fury.io/py/acryl-datahub)\n[![build & test](https://github.com/datahub-project/datahub/workflows/build%20&%20test/badge.svg?branch=master&event=push)](https://github.com/datahub-project/datahub/actions?query=workflow%3A%22build+%26+test%22+branch%3Amaster+event%3Apush)\n[![Docker Pulls](https://img.shields.io/docker/pulls/linkedin/datahub-gms.svg)](https://hub.docker.com/r/linkedin/datahub-gms)\n[![Slack](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://slack.datahubproject.io)\n[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/datahub-project/datahub/blob/master/docs/CONTRIBUTING.md)\n[![GitHub commit activity](https://img.shields.io/github/commit-activity/m/datahub-project/datahub)](https://github.com/datahub-project/datahub/pulls?q=is%3Apr)\n[![License](https://img.shields.io/github/license/datahub-project/datahub)](https://github.com/datahub-project/datahub/blob/master/LICENSE)\n[![YouTube](https://img.shields.io/youtube/channel/subscribers/UC3qFQC5IiwR5fvWEqi_tJ5w?style=social)](https://www.youtube.com/channel/UC3qFQC5IiwR5fvWEqi_tJ5w)\n[![Medium](https://img.shields.io/badge/Medium-12100E?style=for-the-badge&logo=medium&logoColor=white)](https://medium.com/datahub-project)\n[![Follow](https://img.shields.io/twitter/follow/datahubproject?label=Follow&style=social)](https://twitter.com/datahubproject)\n### \ud83c\udfe0 Hosted DataHub Docs (Courtesy of Acryl Data): [datahubproject.io](https://datahubproject.io/docs)\n\n---\n\n[Quickstart](https://datahubproject.io/docs/quickstart) |\n[Features](https://datahubproject.io/docs/features) |\n[Roadmap](https://feature-requests.datahubproject.io/roadmap) |\n[Adoption](#adoption) |\n[Demo](https://datahubproject.io/docs/demo) |\n[Town Hall](https://datahubproject.io/docs/townhalls)\n\n---\n> \ud83d\udce3\u2002DataHub Town Hall is the 4th Thursday at 9am US PT of every month - [add it to your calendar!](https://rsvp.datahubproject.io/)\n>\n> - Town-hall Zoom link: [zoom.datahubproject.io](https://zoom.datahubproject.io)\n> - [Meeting details](docs/townhalls.md) & [past recordings](docs/townhall-history.md)\n\n> \u2728\u2002DataHub Community Highlights:\n>\n> - Read our Monthly Project Updates [here](https://blog.datahubproject.io/tagged/project-updates).\n> - Bringing The Power Of The DataHub Real-Time Metadata Graph To Everyone At Acryl Data: [Data Engineering Podcast](https://www.dataengineeringpodcast.com/acryl-data-datahub-metadata-graph-episode-230/)\n> - Check out our most-read blog post, [DataHub: Popular Metadata Architectures Explained](https://engineering.linkedin.com/blog/2020/datahub-popular-metadata-architectures-explained) @ LinkedIn Engineering Blog.\n> - Join us on [Slack](docs/slack.md)! Ask questions and keep up with the latest announcements.\n\n## Introduction\n\nDataHub is an open-source metadata platform for the modern data stack. Read about the architectures of different metadata systems and why DataHub excels [here](https://engineering.linkedin.com/blog/2020/datahub-popular-metadata-architectures-explained). Also read our\n[LinkedIn Engineering blog post](https://engineering.linkedin.com/blog/2019/data-hub), check out our [Strata presentation](https://speakerdeck.com/shirshanka/the-evolution-of-metadata-linkedins-journey-strata-nyc-2019) and watch our [Crunch Conference Talk](https://www.youtube.com/watch?v=OB-O0Y6OYDE). You should also visit [DataHub Architecture](docs/architecture/architecture.md) to get a better understanding of how DataHub is implemented.\n\n## Features & Roadmap\n\nCheck out DataHub's [Features](docs/features.md) & [Roadmap](https://feature-requests.datahubproject.io/roadmap).\n\n## Demo and Screenshots\n\nThere's a [hosted demo environment](https://datahubproject.io/docs/demo) courtesy of [Acryl Data](https://acryldata.io) where you can explore DataHub without installing it locally\n\n## Quickstart\n\nPlease follow the [DataHub Quickstart Guide](https://datahubproject.io/docs/quickstart) to get a copy of DataHub up & running locally using [Docker](https://docker.com). As the guide assumes some basic knowledge of Docker, we'd recommend you to go through the \"Hello World\" example of [A Docker Tutorial for Beginners](https://docker-curriculum.com) if Docker is completely foreign to you.\n\n## Development\n\nIf you're looking to build & modify datahub please take a look at our [Development Guide](https://datahubproject.io/docs/developers).\n\n[![DataHub Demo GIF](docs/imgs/entity.png)](https://datahubproject.io/docs/demo)\n\n## Source Code and Repositories\n\n- [datahub-project/datahub](https://github.com/datahub-project/datahub): This repository contains the complete source code for DataHub's metadata model, metadata services, integration connectors and the web application.\n- [acryldata/datahub-actions](https://github.com/acryldata/datahub-actions): DataHub Actions is a framework for responding to changes to your DataHub Metadata Graph in real time.\n- [acryldata/datahub-helm](https://github.com/acryldata/datahub-helm): Repository of helm charts for deploying DataHub on a Kubernetes cluster\n- [acryldata/meta-world](https://github.com/acryldata/meta-world): A repository to store recipes, custom sources, transformations and other things to make your DataHub experience magical\n\n## Releases\n\nSee [Releases](https://github.com/datahub-project/datahub/releases) page for more details. We follow the [SemVer Specification](https://semver.org) when versioning the releases and adopt the [Keep a Changelog convention](https://keepachangelog.com/) for the changelog format.\n\n## Contributing\n\nWe welcome contributions from the community. Please refer to our [Contributing Guidelines](docs/CONTRIBUTING.md) for more details. We also have a [contrib](contrib) directory for incubating experimental features.\n\n## Community\n\nJoin our [Slack workspace](https://slack.datahubproject.io) for discussions and important announcements. You can also find out more about our upcoming [town hall meetings](docs/townhalls.md) and view past recordings.\n\n## Adoption\n\nHere are the companies that have officially adopted DataHub. Please feel free to add yours to the list if we missed it.\n\n- [ABLY](https://ably.team/)\n- [Adevinta](https://www.adevinta.com/)\n- [Banksalad](https://www.banksalad.com)\n- [Cabify](https://cabify.tech/)\n- [DefinedCrowd](http://www.definedcrowd.com)\n- [DFDS](https://www.dfds.com/)\n- [Expedia Group](http://expedia.com)\n- [Experius](https://www.experius.nl)\n- [Geotab](https://www.geotab.com)\n- [Grofers](https://grofers.com)\n- [Haibo Technology](https://www.botech.com.cn)\n- [hipages](https://hipages.com.au/)\n- [IOMED](https://iomed.health)\n- [Klarna](https://www.klarna.com)\n- [LinkedIn](http://linkedin.com)\n- [Moloco](https://www.moloco.com/en)\n- [Peloton](https://www.onepeloton.com)\n- [Saxo Bank](https://www.home.saxo)\n- [Stash](https://www.stash.com)\n- [Shanghai HuaRui Bank](https://www.shrbank.com)\n- [ThoughtWorks](https://www.thoughtworks.com)\n- [TypeForm](http://typeform.com)\n- [Uphold](https://uphold.com)\n- [Viasat](https://viasat.com)\n- [Wolt](https://wolt.com)\n- [Zynga](https://www.zynga.com)\n\n\n## Select Articles & Talks\n\n- [DataHub Blog](https://blog.datahubproject.io/)\n- [DataHub YouTube Channel](https://www.youtube.com/channel/UC3qFQC5IiwR5fvWEqi_tJ5w)\n- [Optum: Data Mesh via DataHub](https://optum.github.io/blog/2022/03/23/data-mesh-via-datahub/)\n- [Saxo Bank: Enabling Data Discovery in Data Mesh](https://medium.com/datahub-project/enabling-data-discovery-in-a-data-mesh-the-saxo-journey-451b06969c8f)\n- [Bringing The Power Of The DataHub Real-Time Metadata Graph To Everyone At Acryl Data](https://www.dataengineeringpodcast.com/acryl-data-datahub-metadata-graph-episode-230/)\n- [DataHub: Popular Metadata Architectures Explained](https://engineering.linkedin.com/blog/2020/datahub-popular-metadata-architectures-explained)\n- [Driving DataOps Culture with LinkedIn DataHub](https://www.youtube.com/watch?v=ccsIKK9nVxk) @ [DataOps Unleashed 2021](https://dataopsunleashed.com/#shirshanka-session)\n- [The evolution of metadata: LinkedIn\u2019s story](https://speakerdeck.com/shirshanka/the-evolution-of-metadata-linkedins-journey-strata-nyc-2019) @ [Strata Data Conference 2019](https://conferences.oreilly.com/strata/strata-ny-2019.html)\n- [Journey of metadata at LinkedIn](https://www.youtube.com/watch?v=OB-O0Y6OYDE) @ [Crunch Data Conference 2019](https://crunchconf.com/2019)\n- [DataHub Journey with Expedia Group](https://www.youtube.com/watch?v=ajcRdB22s5o)\n- [Data Discoverability at SpotHero](https://www.slideshare.net/MaggieHays/data-discoverability-at-spothero)\n- [Data Catalogue \u2014 Knowing your data](https://medium.com/albert-franzi/data-catalogue-knowing-your-data-15f7d0724900)\n- [DataHub: A Generalized Metadata Search & Discovery Tool](https://engineering.linkedin.com/blog/2019/data-hub)\n- [Open sourcing DataHub: LinkedIn\u2019s metadata search and discovery platform](https://engineering.linkedin.com/blog/2020/open-sourcing-datahub--linkedins-metadata-search-and-discovery-p)\n- [Emerging Architectures for Modern Data Infrastructure](https://future.com/emerging-architectures-for-modern-data-infrastructure-2020/)\n\nSee the full list [here](docs/links.md).\n\n## License\n\n[Apache License 2.0](./LICENSE).\n", - "source_links": [], - "id": 34 - }, - { - "page_link": null, - "title": "dex readme", - "text": null, - "source_links": [], - "id": 35 - }, - { - "page_link": "https://github.com/directus/directus", - "title": "directus readme", - "text": null, - "source_links": [], - "id": 36 - }, - { - "page_link": "bring-your-own-postgres.md", - "title": "bring-your-own-postgres", - "text": "# Bring Your Own Postgres DB\n\nSome users might prefer to use and external or managed postgres instance rather than an on-cluster one. In that case, only a small reconfiguration is required, in `directus/helm/directus/values.yaml` overlay the following:\n\n```yaml\ndirectus:\n postgres:\n enabled: false # if you'd like to remove the existing db\n dsn: 'postgresql://:@:/'\n```\n\nYou can use any valid postgres connection string, and might need to tweak sslmode and so forth to get the exact correct value. This file will be encrypted, so no worries about secret exposure as well.\n\nOnce that reconfiguration has been made, simply run: `plural build --only directus && plural deploy --commit \"redeploy directus\"` to apply the changes on your cluster.", - "source_links": [], - "id": 37 - }, - { - "page_link": "https://github.com/elastic/cloud-on-k8s", - "title": "elasticsearch readme", - "text": "\n[![Build status](https://badge.buildkite.com/8fe262ce6fc1da017fc91c35465c1fe0addbc94c38afc9f04b.svg?branch=main)](https://buildkite.com/elastic/cloud-on-k8s-operator)\n[![GitHub release](https://img.shields.io/github/v/release/elastic/cloud-on-k8s.svg)](https://github.com/elastic/cloud-on-k8s/releases/latest)\n\n# Elastic Cloud on Kubernetes (ECK)\n\nElastic Cloud on Kubernetes automates the deployment, provisioning, management, and orchestration of Elasticsearch, Kibana, APM Server, Enterprise Search, Beats, Elastic Agent, Elastic Maps Server, and Logstash on Kubernetes based on the operator pattern.\n\nCurrent features:\n\n* Elasticsearch, Kibana, APM Server, Enterprise Search, and Beats deployments\n* TLS Certificates management\n* Safe Elasticsearch cluster configuration & topology changes\n* Persistent volumes usage\n* Custom node configuration and attributes\n* Secure settings keystore updates\n\nSupported versions:\n\n* Kubernetes 1.24-1.27\n* OpenShift 4.9-4.13\n* Elasticsearch, Kibana, APM Server: 6.8+, 7.1+, 8+\n* Enterprise Search: 7.7+, 8+\n* Beats: 7.0+, 8+\n* Elastic Agent: 7.10+ (standalone), 7.14+, 8+ (Fleet)\n* Elastic Maps Server: 7.11+, 8+\n* Logstash 8.7+\n\nCheck the [Quickstart](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html) to deploy your first cluster with ECK.\n\nIf you want to contribute to the project, check our [contributing guide](CONTRIBUTING.md) and see [how to setup a local development environment](dev-setup.md).\n\nFor general questions, please see the Elastic [forums](https://discuss.elastic.co/c/eck).\n", - "source_links": [], - "id": 38 - }, - { - "page_link": "external-ingress.md", - "title": "external-ingress", - "text": "# Set Up External Ingress\n\nIf you'd like to access your elasticsearch cluster externally, you can do that with a relatively simply helm reconfiguration. At `elasticsearch/helm/elasticsearch/values.yaml` add:\n\n```yaml\nelasticsearch:\n ingressElastic:\n enabled: true\n hostname: elasticsearch.CLUSTER_SUBDOMAIN\n```\n\n`CLUSTER_SUBDOMAIN` should be the same subdomain you use for all other apps in the cluster, if it were given something differently, externaldns and cert isssuance will fail and your install will not be accessible.", - "source_links": [], - "id": 39 - }, - { - "page_link": "https://github.com/etcd-io/etcd", - "title": "etcd readme", - "text": "# etcd\n\n[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/etcd?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/etcd)\n[![Coverage](https://codecov.io/gh/etcd-io/etcd/branch/main/graph/badge.svg)](https://codecov.io/gh/etcd-io/etcd)\n[![Tests](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml)\n[![codeql-analysis](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml)\n[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)\n[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/etcd)\n[![Releases](https://img.shields.io/github/release/etcd-io/etcd/all.svg?style=flat-square)](https://github.com/etcd-io/etcd/releases)\n[![LICENSE](https://img.shields.io/github/license/etcd-io/etcd.svg?style=flat-square)](https://github.com/etcd-io/etcd/blob/main/LICENSE)\n[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/etcd-io/etcd/badge)](https://api.securityscorecards.dev/projects/github.com/etcd-io/etcd)\n\n**Note**: The `main` branch may be in an *unstable or even broken state* during development. For stable versions, see [releases][github-release].\n\n![etcd Logo](logos/etcd-horizontal-color.svg)\n\netcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being:\n\n* *Simple*: well-defined, user-facing API (gRPC)\n* *Secure*: automatic TLS with optional client cert authentication\n* *Fast*: benchmarked 10,000 writes/sec\n* *Reliable*: properly distributed using Raft\n\netcd is written in Go and uses the [Raft][] consensus algorithm to manage a highly-available replicated log.\n\netcd is used [in production by many companies](./ADOPTERS.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [locksmith][], [vulcand][], [Doorman][], and many others. Reliability is further ensured by [**rigorous testing**](https://github.com/etcd-io/etcd/tree/main/tests/functional).\n\nSee [etcdctl][etcdctl] for a simple command line client.\n\n[raft]: https://raft.github.io/\n[k8s]: http://kubernetes.io/\n[doorman]: https://github.com/youtube/doorman\n[locksmith]: https://github.com/coreos/locksmith\n[vulcand]: https://github.com/vulcand/vulcand\n[etcdctl]: https://github.com/etcd-io/etcd/tree/main/etcdctl\n\n## Maintainers\n\n[MAINTAINERS](MAINTAINERS) strive to shape an inclusive open source project culture where users are heard and contributors feel respected and empowered. MAINTAINERS maintain productive relationships across different companies and disciplines. Read more about [MAINTAINERS role and responsibilities](GOVERNANCE.md#maintainers).\n\n## Getting started\n\n### Getting etcd\n\nThe easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, and Docker on the [release page][github-release].\n\nFor more installation guides, please check out [play.etcd.io](http://play.etcd.io) and [operating etcd](https://etcd.io/docs/latest/op-guide).\n\n[github-release]: https://github.com/etcd-io/etcd/releases\n[branch-management]: https://etcd.io/docs/latest/branch_management\n\n### Running etcd\n\nFirst start a single-member cluster of etcd.\n\nIf etcd is installed using the [pre-built release binaries][github-release], run it from the installation location as below:\n\n```bash\n/tmp/etcd-download-test/etcd\n```\n\nThe etcd command can be simply run as such if it is moved to the system path as below:\n\n```bash\nmv /tmp/etcd-download-test/etcd /usr/local/bin/\netcd\n```\n\nThis will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.\n\nNext, let's set a single key, and then retrieve it:\n\n```\netcdctl put mykey \"this is awesome\"\netcdctl get mykey\n```\n\netcd is now running and serving client requests. For more, please check out:\n\n- [Interactive etcd playground](http://play.etcd.io)\n- [Animated quick demo](https://etcd.io/docs/latest/demo)\n\n### etcd TCP ports\n\nThe [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.\n\n[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt\n\n### Running a local etcd cluster\n\nFirst install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.\n\nOur [Procfile script](./Procfile) will set up a local example cluster. Start it with:\n\n```bash\ngoreman start\n```\n\nThis will bring up 3 etcd members `infra1`, `infra2` and `infra3` and optionally etcd `grpc-proxy`, which runs locally and composes a cluster.\n\nEvery cluster member and proxy accepts key value reads and key value writes.\n\nFollow the steps in [Procfile.learner](./Procfile.learner) to add a learner node to the cluster. Start the learner node with:\n\n```bash\ngoreman -f ./Procfile.learner start\n```\n\n### Install etcd client v3\n\n```bash\ngo get go.etcd.io/etcd/client/v3\n```\n\n### Next steps\n\nNow it's time to dig into the full etcd API and other guides.\n\n- Read the full [documentation][].\n- Explore the full gRPC [API][].\n- Set up a [multi-machine cluster][clustering].\n- Learn the [config format, env variables and flags][configuration].\n- Find [language bindings and tools][integrations].\n- Use TLS to [secure an etcd cluster][security].\n- [Tune etcd][tuning].\n\n[documentation]: https://etcd.io/docs/latest\n[api]: https://etcd.io/docs/latest/learning/api\n[clustering]: https://etcd.io/docs/latest/op-guide/clustering\n[configuration]: https://etcd.io/docs/latest/op-guide/configuration\n[integrations]: https://etcd.io/docs/latest/integrations\n[security]: https://etcd.io/docs/latest/op-guide/security\n[tuning]: https://etcd.io/docs/latest/tuning\n\n## Contact\n\n- Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)\n- Slack: [#etcd](https://kubernetes.slack.com/messages/C3HD8ARJ5/details/) channel on Kubernetes ([get an invite](http://slack.kubernetes.io/))\n- [Community meetings](#Community-meetings)\n\n### Community meetings\n\netcd contributors and maintainers have monthly (every four weeks) meetings at 11:00 AM (USA Pacific) on Thursday.\n\nAn initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas.\n\nMeeting recordings are uploaded to official etcd [YouTube channel].\n\nGet calendar invitation by joining [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) mailing group.\n\nJoin Hangouts Meet: [meet.google.com/umg-nrxn-qvs](https://meet.google.com/umg-nrxn-qvs)\n\nJoin by phone: +1 405-792-0633\u202c PIN: \u202a299 906\u202c#\n\n[shared-meeting-notes]: https://docs.google.com/document/d/16XEGyPBisZvmmoIHSZzv__LoyOeluC5a4x353CX0SIM/edit\n[YouTube channel]: https://www.youtube.com/channel/UC7tUWR24I5AR9NMsG-NYBlg\n\n## Contributing\n\nSee [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow.\n\n## Reporting bugs\n\nSee [reporting bugs](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/reporting_bugs.md) for details about reporting any issues.\n\n## Reporting a security vulnerability\n\nSee [security disclosure and release process](security/README.md) for details on how to report a security vulnerability and how the etcd team manages it.\n\n## Issue and PR management\n\nSee [issue triage guidelines](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_issues.md) for details on how issues are managed.\n\nSee [PR management](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_prs.md) for guidelines on how pull requests are managed.\n\n## etcd Emeritus Maintainers\n\nThese emeritus maintainers dedicated a part of their career to etcd and reviewed code, triaged bugs and pushed the project forward over a substantial period of time. Their contribution is greatly appreciated.\n\n* Fanmin Shi\n* Anthony Romano\n* Brandon Philips\n* Joe Betz\n* Gyuho Lee\n* Jingyi Hu\n* Wenjia Zhang\n* Xiang Li\n* Ben Darnell\n* Tobias Grieger\n\n### License\n\netcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.\n", - "source_links": [], - "id": 40 - }, - { - "page_link": null, - "title": "external-secrets readme", - "text": null, - "source_links": [], - "id": 41 - }, - { - "page_link": null, - "title": "filecoin readme", - "text": null, - "source_links": [], - "id": 42 - }, - { - "page_link": null, - "title": "gcp-config-connector readme", - "text": null, - "source_links": [], - "id": 43 - }, - { - "page_link": "https://github.com/TryGhost/Ghost", - "title": "ghost readme", - "text": " \n

\n \n \"Ghost\"\n \n \n \"Ghost\"\n \n

\n \n\n

\n Ghost.org \u2022\n Forum \u2022\n Docs \u2022\n Contributing \u2022\n Twitter\n

\n \n \"Downloads\"\n \n \n \"Latest\n \n \n \"Build\n \n \n \"Contributors\"\n \n

\n

\n Love open source? We're hiring JavaScript engineers to work on Ghost full-time.\n

\n\n \n\n\"Fiercely\n\n \n\n\"Ghost(Pro)\"\n\"Ghost(Pro)\"\n\nThe easiest way to get a production instance deployed is with our official **[Ghost(Pro)](https://ghost.org/pricing/)** managed service. It takes about 2 minutes to launch a new site with worldwide CDN, backups, security and maintenance all done for you.\n\nFor most people this ends up being the best value option cause of [how much time it saves](https://ghost.org/docs/hosting/) \u2014 and 100% of revenue goes to the Ghost Foundation; funding the maintenance and further development of the project itself. So you\u2019ll be supporting open source software *and* getting a great service!\n\nIf you prefer to run on your own infrastructure, we also offer official 1-off installs and managed support and maintenance plans via **[Ghost(Valet)](https://valet.ghost.org)** - which can save a substantial amount of developer time and resources.\n\n \n\n# Quickstart install\n\nIf you want to run your own instance of Ghost, in most cases the best way is to use our **CLI tool**\n\n```\nnpm install ghost-cli -g\n```\n\n \n\nThen, if installing locally add the `local` flag to get up and running in under a minute - [Local install docs](https://ghost.org/docs/install/local/)\n\n```\nghost install local\n```\n\n \n\nor on a server run the full install, including automatic SSL setup using LetsEncrypt - [Production install docs](https://ghost.org/docs/install/ubuntu/)\n\n```\nghost install\n```\n\n \n\nCheck out our [official documentation](https://ghost.org/docs/) for more information about our [recommended hosting stack](https://ghost.org/docs/hosting/) & properly [upgrading Ghost](https://ghost.org/docs/update/), plus everything you need to develop your own Ghost [themes](https://ghost.org/docs/themes/) or work with [our API](https://ghost.org/docs/content-api/).\n\n### Contributors & advanced developers\n\nFor anyone wishing to contribute to Ghost or to hack/customize core files we recommend following our full development setup guides: [Contributor guide](https://ghost.org/docs/contributing/) \u2022 [Developer setup](https://ghost.org/docs/install/source/)\n\n \n\n# Ghost sponsors\n\nWe'd like to extend big thanks to our sponsors and partners who make Ghost possible. If you're interested in sponsoring Ghost and supporting the project, please check out our profile on [GitHub sponsors](https://github.com/sponsors/TryGhost) :heart:\n\n**[DigitalOcean](https://m.do.co/c/9ff29836d717)** \u2022 **[Fastly](https://www.fastly.com/)**\n\n \n\n# Getting help\n\nYou can find answers to a huge variety of questions, along with a large community of helpful developers over on the [Ghost forum](https://forum.ghost.org/) - replies are generally very quick. **Ghost(Pro)** customers also have access to 24/7 email support.\n\nTo stay up to date with all the latest news and product updates, make sure you [subscribe to our blog](https://ghost.org/blog/) \u2014 or you can always follow us [on Twitter](https://twitter.com/Ghost), if you prefer your updates bite-sized and facetious. :saxophone::turtle:\n\n \n\n# Copyright & license\n\nCopyright (c) 2013-2022 Ghost Foundation - Released under the [MIT license](LICENSE). Ghost and the Ghost Logo are trademarks of Ghost Foundation Ltd. Please see our [trademark policy](https://ghost.org/trademark/) for info on acceptable usage.\n", - "source_links": [], - "id": 44 - }, - { - "page_link": "https://gitlab.com/gitlab-org/gitlab", - "title": "gitlab readme", - "text": "# GitLab\n\n## Canonical source\n\nThe canonical source of GitLab where all development takes place is [hosted on GitLab.com](https://gitlab.com/gitlab-org/gitlab).\n\nIf you wish to clone a copy of GitLab without proprietary code, you can use the read-only mirror of GitLab located at https://gitlab.com/gitlab-org/gitlab-foss/. However, please do not submit any issues and/or merge requests to that project.\n\n## Free trial\n\nYou can request a free trial of GitLab Ultimate [on our website](https://about.gitlab.com/free-trial/).\n\n## Open source software to collaborate on code\n\nTo see how GitLab looks please see the [features page on our website](https://about.gitlab.com/features/).\n\n- Manage Git repositories with fine grained access controls that keep your code secure\n- Perform code reviews and enhance collaboration with merge requests\n- Complete continuous integration (CI) and continuous deployment/delivery (CD) pipelines to build, test, and deploy your applications\n- Each project can also have an issue tracker, issue board, and a wiki\n- Used by more than 100,000 organizations, GitLab is the most popular solution to manage Git repositories on-premises\n- Completely free and open source (MIT Expat license)\n\n## Editions\n\nThere are three editions of GitLab:\n\n- GitLab Community Edition (CE) is available freely under the MIT Expat license.\n- GitLab Enterprise Edition (EE) includes [extra features](https://about.gitlab.com/pricing/#compare-options) that are more useful for organizations with more than 100 users. To use EE and get official support please [become a subscriber](https://about.gitlab.com/pricing/).\n- JiHu Edition (JH) tailored specifically for the [Chinese market](https://about.gitlab.cn/).\n\n## Licensing\n\nSee the [LICENSE](LICENSE) file for licensing information as it pertains to\nfiles in this repository.\n\n## Hiring\n\nWe are hiring developers, support people, and production engineers all the time, please see our [jobs page](https://about.gitlab.com/jobs/).\n\n## Website\n\nOn [about.gitlab.com](https://about.gitlab.com/) you can find more information about:\n\n- [Subscriptions](https://about.gitlab.com/pricing/)\n- [Consultancy](https://about.gitlab.com/consultancy/)\n- [Community](https://about.gitlab.com/community/)\n- [Hosted GitLab.com](https://about.gitlab.com/gitlab-com/) use GitLab as a free service\n- [GitLab Enterprise Edition](https://about.gitlab.com/features/#enterprise) with additional features aimed at larger organizations.\n- [GitLab CI](https://about.gitlab.com/gitlab-ci/) a continuous integration (CI) server that is easy to integrate with GitLab.\n\n## Requirements\n\nPlease see the [requirements documentation](doc/install/requirements.md) for system requirements and more information about the supported operating systems.\n\n## Installation\n\nThe recommended way to install GitLab is with the [Omnibus packages](https://about.gitlab.com/downloads/) on our package server.\nCompared to an installation from source, this is faster and less error prone.\nJust select your operating system, download the respective package (Debian or RPM) and install it using the system's package manager.\n\nThere are various other options to install GitLab, please refer to the [installation page on the GitLab website](https://about.gitlab.com/installation/) for more information.\n\n## Contributing\n\nGitLab is an open source project and we are very happy to accept community contributions. Please refer to [Contributing to GitLab page](https://about.gitlab.com/contributing/) for more details.\n\n## Install a development environment\n\nTo work on GitLab itself, we recommend setting up your development environment with [the GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit).\nIf you do not use the GitLab Development Kit you need to install and configure all the dependencies yourself, this is a lot of work and error prone.\nOne small thing you also have to do when installing it yourself is to copy the example development Puma configuration file:\n\n```shell\ncp config/puma.example.development.rb config/puma.rb\n```\n\nInstructions on how to start GitLab and how to run the tests can be found in the [getting started section of the GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit#getting-started).\n\n## Software stack\n\nGitLab is a Ruby on Rails application that runs on the following software:\n\n- Ubuntu/Debian/CentOS/RHEL/OpenSUSE\n- Ruby (MRI) 3.0.5\n- Git 2.33+\n- Redis 5.0+\n- PostgreSQL 12+\n\nFor more information please see the [architecture](https://docs.gitlab.com/ee/development/architecture.html) and [requirements](https://docs.gitlab.com/ee/install/requirements.html) documentation.\n\n## UX design\n\nPlease adhere to the [UX Guide](https://design.gitlab.com/) when creating designs and implementing code.\n\n## Third-party applications\n\nThere are a lot of [third-party applications integrating with GitLab](https://about.gitlab.com/applications/). These include GUI Git clients, mobile applications and API wrappers for various languages.\n\n## GitLab release cycle\n\nFor more information about the release process see the [release documentation](https://gitlab.com/gitlab-org/release-tools/blob/master/README.md).\n\n## Upgrading\n\nFor upgrading information please see our [update page](https://about.gitlab.com/update/).\n\n## Documentation\n\nAll documentation can be found on .\n\n## Getting help\n\nPlease see [Getting help for GitLab](https://about.gitlab.com/getting-help/) on our website for the many options to get help.\n\n## Why?\n\n[Read here](https://about.gitlab.com/why/)\n\n## Is it any good?\n\n[Yes](https://about.gitlab.com/is-it-any-good/)\n\n## Is it awesome?\n\n[These people](https://twitter.com/gitlab/followers) seem to like it.\n", - "source_links": [], - "id": 45 - }, - { - "page_link": null, - "title": "goldilocks readme", - "text": null, - "source_links": [], - "id": 46 - }, - { - "page_link": "https://github.com/grafana/grafana", - "title": "grafana readme", - "text": "![Grafana](docs/logo-horizontal.png)\n\nThe open-source platform for monitoring and observability\n\n[![License](https://img.shields.io/github/license/grafana/grafana)](LICENSE)\n[![Drone](https://drone.grafana.net/api/badges/grafana/grafana/status.svg)](https://drone.grafana.net/grafana/grafana)\n[![Go Report Card](https://goreportcard.com/badge/github.com/grafana/grafana)](https://goreportcard.com/report/github.com/grafana/grafana)\n\nGrafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data-driven culture:\n\n- **Visualizations:** Fast and flexible client side graphs with a multitude of options. Panel plugins offer many different ways to visualize metrics and logs.\n- **Dynamic Dashboards:** Create dynamic & reusable dashboards with template variables that appear as dropdowns at the top of the dashboard.\n- **Explore Metrics:** Explore your data through ad-hoc queries and dynamic drilldown. Split view and compare different time ranges, queries and data sources side by side.\n- **Explore Logs:** Experience the magic of switching from metrics to logs with preserved label filters. Quickly search through all your logs or streaming them live.\n- **Alerting:** Visually define alert rules for your most important metrics. Grafana will continuously evaluate and send notifications to systems like Slack, PagerDuty, VictorOps, OpsGenie.\n- **Mixed Data Sources:** Mix different data sources in the same graph! You can specify a data source on a per-query basis. This works for even custom datasources.\n\n## Get started\n\n- [Get Grafana](https://grafana.com/get)\n- [Installation guides](https://grafana.com/docs/grafana/latest/setup-grafana/installation/)\n\nUnsure if Grafana is for you? Watch Grafana in action on [play.grafana.org](https://play.grafana.org/)!\n\n## Documentation\n\nThe Grafana documentation is available at [grafana.com/docs](https://grafana.com/docs/).\n\n## Contributing\n\nIf you're interested in contributing to the Grafana project:\n\n- Start by reading the [Contributing guide](https://github.com/grafana/grafana/blob/HEAD/CONTRIBUTING.md).\n- Learn how to set up your local environment, in our [Developer guide](https://github.com/grafana/grafana/blob/HEAD/contribute/developer-guide.md).\n- Explore our [beginner-friendly issues](https://github.com/grafana/grafana/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22).\n- Look through our [style guide and Storybook](https://developers.grafana.com/ui/latest/index.html).\n\n## Get involved\n\n- Follow [@grafana on Twitter](https://twitter.com/grafana/).\n- Read and subscribe to the [Grafana blog](https://grafana.com/blog/).\n- If you have a specific question, check out our [discussion forums](https://community.grafana.com/).\n- For general discussions, join us on the [official Slack](https://slack.grafana.com) team.\n\n## License\n\nGrafana is distributed under [AGPL-3.0-only](LICENSE). For Apache-2.0 exceptions, see [LICENSING.md](https://github.com/grafana/grafana/blob/HEAD/LICENSING.md).\n", - "source_links": [], - "id": 47 - }, - { - "page_link": "plugins.md", - "title": "plugins", - "text": "## Adding grafana plugins to your install\n\nyou can simply add to `grafana/helm/grafana/values.yaml` or in grafana's configuration page in your plural console:\n\n```yaml\ngrafana:\n grafana:\n plugins:\n - your-plugin-name\n```", - "source_links": [], - "id": 48 - }, - { - "page_link": "https://github.com/grafana/agent", - "title": "grafana-agent readme", - "text": "

\"Grafana

\n\nGrafana Agent is a vendor-neutral, batteries-included telemetry collector with\nconfiguration inspired by [Terraform][]. It is designed to be flexible,\nperformant, and compatible with multiple ecosystems such as Prometheus and\nOpenTelemetry.\n\nGrafana Agent is based around **components**. Components are wired together to\nform programmable observability **pipelines** for telemetry collection,\nprocessing, and delivery.\n\n> **NOTE**: This page focuses mainly on \"[Flow mode][Grafana Agent Flow],\" the\n> Terraform-inspired revision of Grafana Agent.\n\nGrafana Agent can collect, transform, and send data to:\n\n* The [Prometheus][] ecosystem\n* The [OpenTelemetry][] ecosystem\n* The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Phlare][])\n\n[Terraform]: https://terraform.io\n[Grafana Agent Flow]: https://grafana.com/docs/agent/latest/flow/\n[Prometheus]: https://prometheus.io\n[OpenTelemetry]: https://opentelemetry.io\n[Loki]: https://github.com/grafana/loki\n[Grafana]: https://github.com/grafana/grafana\n[Tempo]: https://github.com/grafana/tempo\n[Mimir]: https://github.com/grafana/mimir\n[Phlare]: https://github.com/grafana/phlare\n\n## Why use Grafana Agent?\n\n* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and\n Grafana open source ecosystems.\n* **Every signal**: Collect telemetry data for metrics, logs, traces, and\n continuous profiles.\n* **Scalable**: Deploy on any number of machines to collect millions of active\n series and terabytes of logs.\n* **Battle-tested**: Grafana Agent extends the existing battle-tested code from\n the Prometheus and OpenTelemetry Collector projects.\n* **Powerful**: Write programmable pipelines with ease, and debug them using a\n [built-in UI][UI].\n* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and\n Apache to get telemetry that's immediately useful.\n\n[UI]: https://grafana.com/docs/agent/latest/flow/monitoring/debugging/#grafana-agent-flow-ui\n\n## Getting started\n\nCheck out our [documentation][] to see:\n\n* [Installation instructions][] for Grafana Agent\n* Details about [Grafana Agent Flow][]\n* Steps for [Getting started][] with Grafana Agent Flow\n* The list of Grafana Agent Flow [Components][]\n\n[documentation]: https://grafana.com/docs/agent/latest/\n[Installation instructions]: https://grafana.com/docs/agent/latest/set-up/\n[Grafana Agent Flow]: https://grafana.com/docs/agent/latest/flow/\n[Getting started]: https://grafana.com/docs/agent/latest/flow/getting_started/\n[Components]: https://grafana.com/docs/agent/latest/flow/reference/components/\n\n## Example\n\n```river\n// Discover Kubernetes pods to collect metrics from.\ndiscovery.kubernetes \"pods\" {\n role = \"pod\"\n}\n\n// Collect metrics from Kubernetes pods.\nprometheus.scrape \"default\" {\n targets = discovery.kubernetes.pods.targets\n forward_to = [prometheus.remote_write.default.receiver]\n}\n\n// Get an API key from disk.\nlocal.file \"apikey\" {\n filename = \"/var/data/my-api-key.txt\"\n is_secret = true\n}\n\n// Send metrics to a Prometheus remote_write endpoint.\nprometheus.remote_write \"default\" {\n endpoint {\n url = \"http://localhost:9009/api/prom/push\"\n\n basic_auth {\n username = \"MY_USERNAME\"\n password = local.file.apikey.content\n }\n }\n}\n```\n\nWe maintain an example [Docker Compose environment][] that can be used to\nlaunch dependencies to play with Grafana Agent locally.\n\n[Docker Compose environment]: ./example/docker-compose/\n\n## Release cadence\n\nA new minor release is planned every six weeks. You can use the list of\n[Milestones][] to see what maintainers are planning on working on for a given\nrelease cycle.\n\nBoth the release cadence and the items assigned to a milestone are best-effort:\nreleases may be moved forwards or backwards if needed, and items may be moved\nto a different milestone or removed entirely. The planned release dates for\nfuture minor releases do not change if one minor release is moved.\n\nPatch and security releases may be created at any time.\n\n[Milestones]: https://github.com/grafana/agent/milestones\n\n## Community\n\nTo engage with the Grafana Agent community:\n\n* Chat with us on our community Slack channel. To invite yourself to the\n Grafana Slack, visit and join the `#agent`\n channel.\n* Ask questions on the [Discussions page][].\n* [File an issue][] for bugs, issues, and feature suggestions.\n* Attend the monthly [community call][].\n\n[Discussions page]: https://github.com/grafana/agent/discussions\n[File an issue]: https://github.com/grafana/agent/issues/new\n[community call]: https://docs.google.com/document/d/1TqaZD1JPfNadZ4V81OCBPCG_TksDYGlNlGdMnTWUSpo\n\n## Contribute\n\nRefer to our [contributors guide][] to learn how to contribute.\n\n[contributors guide]: ./docs/developer/contributing.md\n", - "source_links": [], - "id": 49 - }, - { - "page_link": "https://github.com/grafana/tempo", - "title": "grafana-tempo readme", - "text": "

\"Tempo

\n

\n \"Latest\n \"License\"\n \"Docker\n \"Slack\"\n \"Community\n \"Go\n \"Blerg\n

\n\n\nGrafana Tempo is an open source, easy-to-use and high-scale distributed tracing backend. Tempo is cost-efficient, requiring only object storage to operate, and is deeply integrated with Grafana, Prometheus, and Loki. Tempo can be used with any of the open source tracing protocols, including Jaeger, Zipkin, OpenCensus, Kafka, and OpenTelemetry. It supports key/value lookup only and is designed to work in concert with logs and metrics (exemplars) for discovery.\n\nTempo is Jaeger, Zipkin, Kafka, OpenCensus and OpenTelemetry compatible. It ingests batches in any of the mentioned formats, buffers them and then writes them to Azure, GCS, S3 or local disk. As such it is robust, cheap and easy to operate!\n\n

\"Tempo

\n\n\n## Getting Started\n\n- [Documentation](https://grafana.com/docs/tempo/latest/)\n- [Deployment Examples](./example)\n - Deployment and log discovery Examples\n- [What is Distributed Tracing?](https://opentracing.io/docs/overview/what-is-tracing/)\n\n## Further Reading\n\nTo learn more about Tempo, consult the following documents & talks:\n\n- October 2020 Launch blog post: \"[Announcing Grafana Tempo, a massively scalable distributed tracing system][tempo-launch-post]\"\n- October 2020 Motivations and tradeoffs blog post: \"[Tempo: A game of trade-offs][tempo-tradeoffs-post]\"\n- October 2020 Grafana ObservabilityCON Keynote Tempo announcement: \"[Keynote: What is observability?][tempo-o11ycon-keynote]\"\n- October 2020 Grafana ObservabilityCON Tempo Deep Dive: \"[Tracing made simple with Grafana][tempo-o11ycon-deep-dive]\"\n\n[tempo-launch-post]: https://grafana.com/blog/2020/10/27/announcing-grafana-tempo-a-massively-scalable-distributed-tracing-system/\n[tempo-tradeoffs-post]: https://gouthamve.dev/tempo-a-game-of-trade-offs/\n[tempo-o11ycon-keynote]: https://grafana.com/go/observabilitycon/keynote-what-is-observability/\n[tempo-o11ycon-deep-dive]: https://grafana.com/go/observabilitycon/tracing-made-simple-with-grafana/\n\n## Getting Help\n\nIf you have any questions or feedback regarding Tempo:\n\n- Search existing thread in the Grafana Labs community forum for Tempo: [https://community.grafana.com](https://community.grafana.com/c/grafana-tempo/40)\n- Ask a question on the Tempo Slack channel. To invite yourself to the Grafana Slack, visit [https://slack.grafana.com/](https://slack.grafana.com/) and join the #tempo channel.\n- [File an issue](https://github.com/grafana/tempo/issues/new/choose) for bugs, issues and feature suggestions.\n- UI issues should be filed with [Grafana](https://github.com/grafana/grafana/issues/new/choose).\n\n## OpenTelemetry\n\nTempo's receiver layer, wire format and storage format are all based directly on [standards](https://github.com/open-telemetry/opentelemetry-proto) and [code](https://github.com/open-telemetry/opentelemetry-collector) established by [OpenTelemetry](https://opentelemetry.io/). We support open standards at Grafana!\n\nCheck out the [Integration Guides](https://grafana.com/docs/tempo/latest/guides/instrumentation/) to see examples of OpenTelemetry instrumentation with Tempo.\n\n## Other Components\n\n### tempo-vulture\ntempo-vulture is tempo's bird themed consistency checking tool. It pushes traces and queries Tempo. It metrics 404s and traces with missing spans.\n\n### tempo-cli\ntempo-cli is the place to put any utility functionality related to tempo. See [Documentation](https://grafana.com/docs/tempo/latest/operations/tempo_cli/) for more info.\n\n\n## TempoDB\n\n[TempoDB](https://github.com/grafana/tempo/tree/main/tempodb) is included in the this repository but is meant to be a stand alone key value database built on top of cloud object storage (azure/gcs/s3). It is a natively multitenant, supports a WAL and is the storage engine for Tempo.\n\n## License\n\nGrafana Tempo is distributed under [AGPL-3.0-only](LICENSE). For Apache-2.0 exceptions, see [LICENSING.md](LICENSING.md).\n", - "source_links": [], - "id": 50 - }, - { - "page_link": "https://github.com/growthbook/growthbook", - "title": "growthbook readme", - "text": "

\"GrowthBook

\n

Open Source Feature Flagging and A/B Testing

\n

\n \"Build\n \"MIT\n \"Release\"\n \"Join\n

\n\nGet up and running in 1 minute with:\n\n```sh\ngit clone https://github.com/growthbook/growthbook.git\ncd growthbook\ndocker-compose up -d\n```\n\nThen visit http://localhost:3000\n\n[![GrowthBook Screenshot](/features-screenshot.png)](https://www.growthbook.io)\n\n## Our Philosophy\n\nThe top 1% of companies spend thousands of hours building their own feature flagging and A/B testing platforms in-house.\nThe other 99% are left paying for expensive 3rd party SaaS tools or hacking together unmaintained open source libraries.\n\nWe want to give all companies the flexibility and power of a fully-featured in-house platform without needing to build it themselves.\n\n## Major Features\n\n- \ud83c\udfc1 Feature flags with advanced targeting, gradual rollouts, and experiments\n- \ud83d\udcbb SDKs for [React](https://docs.growthbook.io/lib/react), [Javascript](https://docs.growthbook.io/lib/js), [PHP](https://docs.growthbook.io/lib/php), [Ruby](https://docs.growthbook.io/lib/ruby), [Python](https://docs.growthbook.io/lib/python), [Go](https://docs.growthbook.io/lib/go), and [Kotlin (Android)](https://docs.growthbook.io/lib/kotlin) with more coming soon\n- \ud83c\udd8e Powerful A/B test analysis with support for binomial, count, duration, and revenue metrics\n- \u2744\ufe0f Use your existing data stack - BigQuery, Mixpanel, Redshift, Google Analytics, [and more](https://docs.growthbook.io/app/datasources)\n- \u2b07\ufe0f Drill down into A/B test results by browser, country, or any other custom attribute\n- \ud83e\ude90 Export reports as a Jupyter Notebook!\n- \ud83d\udcdd Document everything with screenshots and GitHub Flavored Markdown throughout\n- \ud83d\udd14 Automated email alerts when A/B tests become significant\n\n## Try GrowthBook\n\n### Managed Cloud Hosting\n\nCreate a free [GrowthBook Cloud](https://app.growthbook.io) account to get started.\n\n### Open Source\n\nThe included [docker-compose.yml](https://github.com/growthbook/growthbook/blob/main/docker-compose.yml) file contains the GrowthBook App and a MongoDB instance (for storing cached experiment results and metadata):\n\n```sh\ngit clone https://github.com/growthbook/growthbook.git\ncd growthbook\ndocker-compose up -d\n```\n\nThen visit http://localhost:3000 to view the app.\n\nCheck out the full [Self-Hosting Instructions](https://docs.growthbook.io/self-host) for more details.\n\n## Documentation and Support\n\nView the [GrowthBook Docs](https://docs.growthbook.io) for info on how to configure and use the platform.\n\nJoin [our Slack community](https://slack.growthbook.io?ref=readme-support) if you get stuck, want to chat, or are thinking of a new feature.\n\nOr email us at [hello@growthbook.io](mailto:hello@growthbook.io) if Slack isn't your thing.\n\nWe're here to help - and to make GrowthBook even better!\n\n## Contributors\n\nWe \u2764\ufe0f all contributions, big and small!\n\nRead [CONTRIBUTING.md](/CONTRIBUTING.md) for how to setup your local development environment.\n\nIf you want to, you can reach out via [Slack](https://slack.growthbook.io?ref=readme-contributing) or [email](mailto:hello@growthbook.io) and we'll set up a pair programming session to get you started.\n\n## License\n\nThis project uses the MIT license. The core GrowthBook app will always remain open and free, although we may add some commercial enterprise add-ons in the future.\n", - "source_links": [], - "id": 51 - }, - { - "page_link": "https://github.com/goharbor/harbor", - "title": "harbor readme", - "text": "# Harbor\n\n[![CI](https://github.com/goharbor/harbor/workflows/CI/badge.svg?branch=main&event=push)](https://github.com/goharbor/harbor/actions?query=event%3Apush+branch%3Amain+workflow%3ACI+)\n[![Coverage Status](https://codecov.io/gh/goharbor/harbor/branch/main/graph/badge.svg)](https://codecov.io/gh/goharbor/harbor)\n[![Go Report Card](https://goreportcard.com/badge/github.com/goharbor/harbor)](https://goreportcard.com/report/github.com/goharbor/harbor)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2095/badge)](https://bestpractices.coreinfrastructure.org/projects/2095)\n[![Codacy Badge](https://app.codacy.com/project/badge/Grade/792fe1755edc4d6e91f4c3469f553389)](https://www.codacy.com/gh/goharbor/harbor/dashboard?utm_source=github.com&utm_medium=referral&utm_content=goharbor/harbor&utm_campaign=Badge_Grade)\n![Code scanning - action](https://github.com/goharbor/harbor/workflows/Code%20scanning%20-%20action/badge.svg)\n[![Nightly Status](https://us-central1-eminent-nation-87317.cloudfunctions.net/harbor-nightly-result)](https://www.googleapis.com/storage/v1/b/harbor-nightly/o)\n![CONFORMANCE_TEST](https://github.com/goharbor/harbor/workflows/CONFORMANCE_TEST/badge.svg)\n[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor?ref=badge_shield)\n
\n\n|![notification](https://raw.githubusercontent.com/goharbor/website/master/docs/img/readme/bell-outline-badged.svg)Community Meeting|\n|------------------|\n|The Harbor Project holds bi-weekly community calls in two different timezones. To join the community calls or to watch previous meeting notes and recordings, please visit the [meeting schedule](https://github.com/goharbor/community/blob/master/MEETING_SCHEDULE.md).|\n\n

\n\n**Note**: The `main` branch may be in an *unstable or even broken state* during development.\nPlease use [releases](https://github.com/vmware/harbor/releases) instead of the `main` branch in order to get a stable set of binaries.\n\n\"Harbor\"\n\nHarbor is an open source trusted cloud native registry project that stores, signs, and scans content. Harbor extends the open source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Having a registry closer to the build and run environment can improve the image transfer efficiency. Harbor supports replication of images between registries, and also offers advanced security features such as user management, access control and activity auditing.\n\nHarbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF). If you are an organization that wants to help shape the evolution of cloud native technologies, consider joining the CNCF. For details about whose involved and how Harbor plays a role, read the CNCF\n[announcement](https://www.cncf.io/blog/2018/07/31/cncf-to-host-harbor-in-the-sandbox/).\n\n## Features\n\n* **Cloud native registry**: With support for both container images and [Helm](https://helm.sh) charts, Harbor serves as registry for cloud native environments like container runtimes and orchestration platforms.\n* **Role based access control**: Users access different repositories through 'projects' and a user can have different permission for images or Helm charts under a project.\n* **Policy based replication**: Images and charts can be replicated (synchronized) between multiple registry instances based on policies with using filters (repository, tag and label). Harbor automatically retries a replication if it encounters any errors. This can be used to assist loadbalancing, achieve high availability, and facilitate multi-datacenter deployments in hybrid and multi-cloud scenarios.\n* **Vulnerability Scanning**: Harbor scans images regularly for vulnerabilities and has policy checks to prevent vulnerable images from being deployed.\n* **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor that can then be given permissions to specific projects. \n* **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal. \n* **Image deletion & garbage collection**: System admin can run garbage collection jobs so that images(dangling manifests and unreferenced blobs) can be deleted and their space can be freed up periodically.\n* **Notary**: Support signing container images using Docker Content Trust (leveraging Notary) for guaranteeing authenticity and provenance. In addition, policies that prevent unsigned images from being deployed can also be activated.\n* **Graphical user portal**: User can easily browse, search repositories and manage projects.\n* **Auditing**: All the operations to the repositories are tracked through logs.\n* **RESTful API**: RESTful APIs are provided to facilitate administrative operations, and are easy to use for integration with external systems. An embedded Swagger UI is available for exploring and testing the API.\n* **Easy deployment**: Harbor can be deployed via Docker compose as well Helm Chart, and a Harbor Operator was added recently as well.\n\n## Architecture\n\nFor learning the architecture design of Harbor, check the document [Architecture Overview of Harbor](https://github.com/goharbor/harbor/wiki/Architecture-Overview-of-Harbor).\n\n## API\n\n* Harbor RESTful API: The APIs for most administrative operations of Harbor and can be used to perform integrations with Harbor programmatically.\n * Part 1: [New or changed APIs](https://editor.swagger.io/?url=https://raw.githubusercontent.com/goharbor/harbor/main/api/v2.0/swagger.yaml)\n\n## Install & Run\n\n**System requirements:**\n\n**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.18.0+ .\n\nDownload binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](https://goharbor.io/docs/latest/install-config/)** to install Harbor.\n\nIf you want to deploy Harbor on Kubernetes, please use the **[Harbor chart](https://github.com/goharbor/harbor-helm)**.\n\nRefer to the **[documentation](https://goharbor.io/docs/)** for more details on how to use Harbor.\n\n## OCI Distribution Conformance Tests\n\nCheck the OCI distribution conformance tests [report](https://storage.googleapis.com/harbor-conformance-test/report.html) of Harbor.\n\n## Compatibility\n\nThe [compatibility list](https://goharbor.io/docs/edge/install-config/harbor-compatibility-list/) document provides compatibility information for the Harbor components.\n\n* [Replication adapters](https://goharbor.io/docs/edge/install-config/harbor-compatibility-list/#replication-adapters)\n* [OIDC adapters](https://goharbor.io/docs/edge/install-config/harbor-compatibility-list/#oidc-adapters)\n* [Scanner adapters](https://goharbor.io/docs/edge/install-config/harbor-compatibility-list/#scanner-adapters)\n\n## Community\n\n* **Twitter:** [@project_harbor](https://twitter.com/project_harbor) \n* **User Group:** Join Harbor user email group: [harbor-users@lists.cncf.io](https://lists.cncf.io/g/harbor-users) to get update of Harbor's news, features, releases, or to provide suggestion and feedback. \n* **Developer Group:** Join Harbor developer group: [harbor-dev@lists.cncf.io](https://lists.cncf.io/g/harbor-dev) for discussion on Harbor development and contribution.\n* **Slack:** Join Harbor's community for discussion and ask questions: [Cloud Native Computing Foundation](https://slack.cncf.io/), channel: [#harbor](https://cloud-native.slack.com/messages/harbor/) and [#harbor-dev](https://cloud-native.slack.com/messages/harbor-dev/)\n\n## Demos\n\n* **[Live Demo](https://demo.goharbor.io)** - A demo environment with the latest Harbor stable build installed. For additional information please refer to [this page](https://goharbor.io/docs/latest/install-config/demo-server/).\n* **[Video Demos](https://github.com/goharbor/harbor/wiki/Video-demos-for-Harbor)** - Demos for Harbor features and continuously updated.\n\n## Partners and Users\n\nFor a list of users, please refer to [ADOPTERS.md](ADOPTERS.md).\n\n## Security\n\n### Security Audit\n\nA third party security audit was performed by Cure53 in October 2019. You can see the full report [here](https://goharbor.io/docs/2.0.0/security/Harbor_Security_Audit_Oct2019.pdf).\n\n### Reporting security vulnerabilities\n\nIf you've found a security related issue, a vulnerability, or a potential vulnerability in Harbor please let the [Harbor Security Team](mailto:cncf-harbor-security@lists.cncf.io) know with the details of the vulnerability. We'll send a confirmation\nemail to acknowledge your report, and we'll send an additional email when we've identified the issue\npositively or negatively.\n\nFor further details please see our complete [security release process](SECURITY.md).\n\n## License\n\nHarbor is available under the [Apache 2 license](LICENSE).\n\nThis project uses open source components which have additional licensing terms. The official docker images and licensing terms for these open source components can be found at the following locations:\n\n* Photon OS 1.0: [docker image](https://hub.docker.com/_/photon/), [license](https://github.com/vmware/photon/blob/master/COPYING)\n\n\n## Fossa Status\n\n[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor?ref=badge_large)", - "source_links": [], - "id": 52 - }, - { - "page_link": "https://github.com/hasura/graphql-engine", - "title": "hasura readme", - "text": "# Hasura GraphQL Engine\n\n[![Latest release](https://img.shields.io/github/v/release/hasura/graphql-engine)](https://github.com/hasura/graphql-engine/releases/latest)\n[![Docs](https://img.shields.io/badge/docs-v1.x-brightgreen.svg?style=flat)](https://hasura.io/docs)\n[![CircleCI](https://circleci.com/gh/hasura/graphql-engine.svg?style=shield)](https://circleci.com/gh/hasura/graphql-engine)\n\n\n\n\n\n\nHasura GraphQL Engine is a blazing-fast GraphQL server that gives you **instant, realtime GraphQL APIs over Postgres**, with [**webhook triggers**](event-triggers.md) on database events, and [**remote schemas**](remote-schemas.md) for business logic.\n\nHasura helps you build [GraphQL](https://hasura.io/graphql/) apps backed by Postgres or incrementally move to GraphQL for existing applications using Postgres.\n\nRead more at [hasura.io](https://hasura.io) and the [docs](https://hasura.io/docs/).\n\n------------------\n\n![Hasura GraphQL Engine Demo](assets/demo.gif)\n\n------------------\n\n![Hasura GraphQL Engine Realtime Demo](assets/realtime.gif)\n\n-------------------\n\n## Features\n\n* **Make powerful queries**: Built-in filtering, pagination, pattern search, bulk insert, update, delete mutations\n* **Realtime**: Convert any GraphQL query to a live query by using subscriptions\n* **Merge remote schemas**: Access custom GraphQL schemas for business logic via a single GraphQL Engine endpoint. [**Read more**](remote-schemas.md).\n* **Trigger webhooks or serverless functions**: On Postgres insert/update/delete events ([read more](event-triggers.md))\n* **Works with existing, live databases**: Point it to an existing Postgres database to instantly get a ready-to-use GraphQL API\n* **Fine-grained access control**: Dynamic access control that integrates with your auth system (eg: auth0, firebase-auth)\n* **High-performance & low-footprint**: ~15MB docker image; ~50MB RAM @ 1000 req/s; multi-core aware\n* **Admin UI & Migrations**: Admin UI & Rails-inspired schema migrations\n* **Postgres** \u2764\ufe0f: Supports Postgres types (PostGIS/geo-location, etc.), turns views to *graphs*, trigger stored functions or procedures with mutations\n\nRead more at [hasura.io](https://hasura.io) and the [docs](https://hasura.io/docs/).\n\n## Table of contents\n\n**Table of Contents**\n\n- [Quickstart:](#quickstart)\n - [One-click deployment on Hasura Cloud](#one-click-deployment-on-hasura-cloud)\n - [Other one-click deployment options](#other-one-click-deployment-options)\n - [Other deployment methods](#other-deployment-methods)\n- [Architecture](#architecture)\n- [Client-side tooling](#client-side-tooling)\n- [Add business logic](#add-business-logic)\n - [Remote schemas](#remote-schemas)\n - [Trigger webhooks on database events](#trigger-webhooks-on-database-events)\n- [Demos](#demos)\n - [Realtime applications](#realtime-applications)\n - [Videos](#videos)\n- [Support & Troubleshooting](#support--troubleshooting)\n- [Contributing](#contributing)\n- [Brand assets](#brand-assets)\n- [License](#license)\n- [Translations](#translations)\n\n\n\n## Quickstart:\n\n### One-click deployment on Hasura Cloud\n\nThe fastest and easiest way to try Hasura out is via [Hasura Cloud](https://hasura.io/docs/cloud/1.0/manual/getting-started/index.html).\n\n1. Click on the following button to deploy GraphQL engine on Hasura Cloud including Postgres add-on or using an existing Postgres database:\n\n [![Deploy to Hasura Cloud](https://graphql-engine-cdn.hasura.io/img/deploy_to_hasura.png)](https://cloud.hasura.io/)\n\n2. Open the Hasura console\n\n Click on the button \"Launch console\" to open the Hasura console.\n\n3. Make your first GraphQL query\n\n Create a table and instantly run your first query. Follow this [simple guide](https://hasura.io/docs/latest/graphql/core/getting-started/first-graphql-query.html).\n\n### Other one-click deployment options\n\nCheck out the instructions for the following one-click deployment options:\n\n| **Infra provider** | **One-click link** | **Additional information** |\n|:------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------:|\n| Heroku | [![Deploy to Heroku](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/hasura/graphql-engine-heroku) | [docs](https://hasura.io/docs/latest/graphql/core/guides/deployment/heroku-one-click.html) |\n| DigitalOcean | [![Deploy to DigitalOcean](https://graphql-engine-cdn.hasura.io/img/create_hasura_droplet_200px.png)](https://marketplace.digitalocean.com/apps/hasura?action=deploy&refcode=c4d9092d2c48&utm_source=hasura&utm_campaign=readme) | [docs](https://hasura.io/docs/latest/graphql/core/guides/deployment/digital-ocean-one-click.html#hasura-graphql-engine-digitalocean-one-click-app) |\n| Azure | [![Deploy to Azure](http://azuredeploy.net/deploybutton.png)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3a%2f%2fraw.githubusercontent.com%2fhasura%2fgraphql-engine%2fmaster%2finstall-manifests%2fazure-container-with-pg%2fazuredeploy.json) | [docs](https://hasura.io/docs/latest/graphql/core/guides/deployment/azure-container-instances-postgres.html) |\n| Render | [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/render-examples/hasura-graphql) | [docs](https://hasura.io/docs/latest/graphql/core/guides/deployment/render-one-click.html) |\n\n### Other deployment methods\n\nFor Docker-based deployment and advanced configuration options, see [deployment\nguides](https://hasura.io/docs/latest/graphql/core/getting-started/index.html) or\n[install manifests](install-manifests).\n\n## Architecture\n\nThe Hasura GraphQL Engine fronts a Postgres database instance and can accept GraphQL requests from your client apps. It can be configured to work with your existing auth system and can handle access control using field-level rules with dynamic variables from your auth system.\n\nYou can also merge remote GraphQL schemas and provide a unified GraphQL API.\n\n![Hasura GraphQL Engine architecture](assets/hasura-arch.svg)\n\n## Client-side tooling\n\nHasura works with any GraphQL client. We recommend using [Apollo Client](https://github.com/apollographql/apollo-client). See [awesome-graphql](https://github.com/chentsulin/awesome-graphql) for a list of clients.\n\n## Add business logic\n\nGraphQL Engine provides easy-to-reason, scalable and performant methods for adding custom business logic to your backend:\n\n### Remote schemas\n\nAdd custom resolvers in a remote schema in addition to Hasura's Postgres-based GraphQL schema. Ideal for use-cases like implementing a payment API, or querying data that is not in your database - [read more](remote-schemas.md).\n\n### Trigger webhooks on database events\n\nAdd asynchronous business logic that is triggered based on database events.\nIdeal for notifications, data-pipelines from Postgres or asynchronous\nprocessing - [read more](event-triggers.md).\n\n### Derived data or data transformations\n\nTransform data in Postgres or run business logic on it to derive another dataset that can be queried using GraphQL Engine - [read more](https://hasura.io/docs/latest/graphql/core/queries/derived-data.html).\n\n## Demos\n\nCheck out all the example applications in the [community/sample-apps](community/sample-apps) directory.\n\n### Realtime applications\n\n- Group Chat application built with React, includes a typing indicator, online users & new\n message notifications.\n - [Try it out](https://realtime-chat.demo.hasura.app/)\n - [Tutorial](community/sample-apps/realtime-chat)\n - [Browse APIs](https://realtime-chat.demo.hasura.app/console)\n\n- Live location tracking app that shows a running vehicle changing current GPS\n coordinates moving on a map.\n - [Try it out](https://realtime-location-tracking.demo.hasura.app/)\n - [Tutorial](community/sample-apps/realtime-location-tracking)\n - [Browse APIs](https://realtime-location-tracking.demo.hasura.app/console)\n\n- A realtime dashboard for data aggregations on continuously changing data.\n - [Try it out](https://realtime-poll.demo.hasura.app/)\n - [Tutorial](community/sample-apps/realtime-poll)\n - [Browse APIs](https://realtime-poll.demo.hasura.app/console)\n\n### Videos\n\n* [Add GraphQL to a self-hosted GitLab instance](https://www.youtube.com/watch?v=a2AhxKqd82Q) (*3:44 mins*)\n* [Todo app with Auth0 and GraphQL backend](https://www.youtube.com/watch?v=15ITBYnccgc) (*4:00 mins*)\n* [GraphQL on GitLab integrated with GitLab auth](https://www.youtube.com/watch?v=m1ChRhRLq7o) (*4:05 mins*)\n* [Dashboard for 10million rides with geo-location (PostGIS, Timescale)](https://www.youtube.com/watch?v=tsY573yyGWA) (*3:06 mins*)\n\n\n## Support & Troubleshooting\n\nThe documentation and community will help you troubleshoot most issues. If you have encountered a bug or need to get in touch with us, you can contact us using one of the following channels:\n\n* Support & feedback: [Discord](https://discord.gg/hasura)\n* Issue & bug tracking: [GitHub issues](https://github.com/hasura/graphql-engine/issues)\n* Follow product updates: [@HasuraHQ](https://twitter.com/hasurahq)\n* Talk to us on our [website chat](https://hasura.io)\n\nWe are committed to fostering an open and welcoming environment in the community. Please see the [Code of Conduct](code-of-conduct.md).\n\nIf you want to report a security issue, please [read this](SECURITY.md).\n\n## Contributing\n\nCheck out our [contributing guide](CONTRIBUTING.md) for more details.\n\n## Brand assets\n\nHasura brand assets (logos, the Hasura mascot, powered by badges etc.) can be\nfound in the [assets/brand](assets/brand) folder. Feel free to use them in your\napplication/website etc. We'd be thrilled if you add the \"Powered by Hasura\"\nbadge to your applications built using Hasura. \u2764\ufe0f\n\n
\n \n \n
\n\n```html\n\n\n \n\n\n\n\n \n\n```\n\n## License\n\nThe core GraphQL Engine is available under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) (Apache-2.0).\n\nAll **other contents** (except those in [`server`](server), [`cli`](cli) and\n[`console`](console) directories) are available under the [MIT License](LICENSE-community).\nThis includes everything in the [`docs`](docs) and [`community`](community)\ndirectories.\n\n## Translations\n\nThis readme is available in the following translations:\n\n- [Japanese :jp:](translations/README.japanese.md) (:pray: [@moksahero](https://github.com/moksahero))\n- [French :fr:](translations/README.french.md) (:pray: [@l0ck3](https://github.com/l0ck3))\n- [Bosnian :bosnia_herzegovina:](translations/README.bosnian.md) (:pray: [@hajro92](https://github.com/hajro92))\n- [Russian :ru:](translations/README.russian.md) (:pray: [@highflyer910](https://github.com/highflyer910))\n- [Greek \ud83c\uddec\ud83c\uddf7](translations/README.greek.md) (:pray: [@MIP2000](https://github.com/MIP2000))\n- [Spanish \ud83c\uddf2\ud83c\uddfd](/translations/README.mx_spanish.md)(:pray: [@ferdox2](https://github.com/ferdox2))\n- [Indonesian :indonesia:](translations/README.indonesian.md) (:pray: [@anwari666](https://github.com/anwari666))\n- [Brazilian Portuguese :brazil:](translations/README.portuguese_br.md) (:pray: [@rubensmp](https://github.com/rubensmp))\n- [German \ud83c\udde9\ud83c\uddea](translations/README.german.md) (:pray: [@FynnGrandke](https://github.com/FynnGrandke))\n- [Chinese :cn:](translations/README.chinese.md) (:pray: [@jagreetdg](https://github.com/jagreetdg) & [@johnbanq](https://github.com/johnbanq))\n- [Turkish :tr:](translations/README.turkish.md) (:pray: [@berat](https://github.com/berat))\n- [Korean :kr:](translations/README.korean.md) (:pray: [@\ub77c\uc2a4\ud06c](https://github.com/laskdjlaskdj12))\n\nTranslations for other files can be found [here](translations).\n", - "source_links": [], - "id": 53 - }, - { - "page_link": "https://github.com/segmentio/ory-hydra", - "title": "hydra readme", - "text": "# ![ORY Hydra](docs/images/logo.png)\n\n[![Join the chat at https://gitter.im/ory-am/hydra](https://img.shields.io/badge/join-chat-00cc99.svg)](https://gitter.im/ory-am/hydra?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n[![Join newsletter](https://img.shields.io/badge/join-newsletter-00cc99.svg)](http://eepurl.com/bKT3N9)\n[![Follow twitter](https://img.shields.io/badge/follow-twitter-00cc99.svg)](https://twitter.com/_aeneasr)\n[![Follow GitHub](https://img.shields.io/badge/follow-github-00cc99.svg)](https://github.com/arekkas)\n[![Become a patron!](https://img.shields.io/badge/support%20us-on%20patreon-green.svg)](https://patreon.com/user?u=4298803)\n\n[![Build Status](https://travis-ci.org/ory/hydra.svg?branch=master)](https://travis-ci.org/ory/hydra)\n[![Coverage Status](https://coveralls.io/repos/ory/hydra/badge.svg?branch=master&service=github)](https://coveralls.io/github/ory/hydra?branch=master)\n[![Code Climate](https://codeclimate.com/github/ory/hydra/badges/gpa.svg)](https://codeclimate.com/github/ory/hydra)\n[![Go Report Card](https://goreportcard.com/badge/github.com/ory/hydra)](https://goreportcard.com/report/github.com/ory/hydra)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/364/badge)](https://bestpractices.coreinfrastructure.org/projects/364)\n\n[![Docs Guide](https://img.shields.io/badge/docs-guide-blue.svg)](https://ory.gitbooks.io/hydra/content/)\n[![HTTP API Documentation](https://img.shields.io/badge/docs-http%20api-blue.svg)](http://docs.hydra13.apiary.io/)\n[![Code Documentation](https://img.shields.io/badge/docs-godoc-blue.svg)](https://godoc.org/github.com/ory/hydra)\n\nORY Hydra offers OAuth 2.0 and OpenID Connect Core 1.0 capabilities as a service and is built on top of the security-first\nOAuth2 and OpenID Connect SDK [ORY Fosite](https://github.com/ory/fosite) and the access control\nSDK [ORY Ladon](https://github.com/ory/ladon). ORY Hydra is different, because it works with\nany existing authentication infrastructure, not just LDAP or SAML. By implementing a consent app (works with any programming language)\nyou build a bridge between ORY Hydra and your authentication infrastructure.\n\nORY Hydra is able to securely manage JSON Web Keys, and has a sophisticated policy-based access control you can use if you want to.\n\nORY Hydra is suitable for green- (new) and brownfield (existing) projects. If you are not familiar with OAuth 2.0 and are working\non a greenfield project, we recommend evaluating if OAuth 2.0 really serves your purpose.\n**Knowledge of OAuth 2.0 is imperative in understanding what ORY Hydra does and how it works.**\n\nJoin the [ORY Hydra Newsletter](http://eepurl.com/bKT3N9) to stay on top of new developments. ORY Hydra has a lovely, active\ncommunity on [Gitter](https://gitter.im/ory-am/hydra). For advanced use cases, check out the\n[Enterprise Edition](#enterprise-edition) section.\n\n---\n\n\n\n**Table of Contents**\n\n- [What is ORY Hydra?](#what-is-ory-hydra)\n - [ORY Hydra implements open standards](#ory-hydra-implements-open-standards)\n- [Sponsors & Adopters](#sponsors-&-adopters)\n - [Sponsors](#sponsors)\n - [Adopters](#adopters)\n- [ORY Hydra for Enterprise](#ory-hydra-for-enterprise)\n- [Quickstart](#quickstart)\n - [5 minutes tutorial: Run your very own OAuth2 environment](#5-minutes-tutorial-run-your-very-own-oauth2-environment)\n - [Installation](#installation)\n - [Download binaries](#download-binaries)\n - [Using Docker](#using-docker)\n - [Building from source](#building-from-source)\n- [Security](#security)\n- [Telemetry](#telemetry)\n- [Documentation](#documentation)\n - [Guide](#guide)\n - [HTTP API documentation](#http-api-documentation)\n - [Command line documentation](#command-line-documentation)\n - [Develop](#develop)\n- [Reception](#reception)\n- [Libraries and third-party projects](#libraries-and-third-party-projects)\n- [Blog posts & articles](#blog-posts-&-articles)\n\n\n\n## What is ORY Hydra?\n\nORY Hydra is a server implementation of the OAuth 2.0 authorization framework and the OpenID Connect Core 1.0. Existing OAuth2\nimplementations usually ship as libraries or SDKs such as [node-oauth2-server](https://github.com/oauthjs/node-oauth2-server)\nor [fosite](https://github.com/ory/fosite/issues), or as fully featured identity solutions with user\nmanagement and user interfaces, such as [Dex](https://github.com/coreos/dex).\n\nImplementing and using OAuth2 without understanding the whole specification is challenging and prone to errors, even when\nSDKs are being used. The primary goal of ORY Hydra is to make OAuth 2.0 and OpenID Connect 1.0 better accessible.\n\nORY Hydra implements the flows described in OAuth2 and OpenID Connect 1.0 without forcing you to use a \"Hydra User Management\"\nor some template engine or a predefined front-end. Instead it relies on HTTP redirection and cryptographic methods\nto verify user consent allowing you to use ORY Hydra with any authentication endpoint, be it [authboss](https://github.com/go-authboss/authboss),\n[auth0.com](https://auth0.com/) or your proprietary PHP authentication.\n\n### ORY Hydra implements open standards\n\nORY Hydra implements Open Standards set by the IETF:\n\n* [The OAuth 2.0 Authorization Framework](https://tools.ietf.org/html/rfc6749)\n* [OAuth 2.0 Threat Model and Security Considerations](https://tools.ietf.org/html/rfc6819)\n* [OAuth 2.0 Token Revocation](https://tools.ietf.org/html/rfc7009)\n* [OAuth 2.0 Token Introspection](https://tools.ietf.org/html/rfc7662)\n* [OAuth 2.0 Dynamic Client Registration Protocol](https://tools.ietf.org/html/rfc7591)\n* [OAuth 2.0 Dynamic Client Registration Management Protocol](https://tools.ietf.org/html/rfc7592)\n* [OAuth 2.0 for Native Apps](https://tools.ietf.org/html/draft-ietf-oauth-native-apps-10)\n\nand the OpenID Foundation:\n\n* [OpenID Connect Core 1.0](http://openid.net/specs/openid-connect-core-1_0.html)\n* [OpenID Connect Discovery 1.0](https://openid.net/specs/openid-connect-discovery-1_0.html)\n\n## Sponsors & Adopters\n\nThis is a cureated list of Hydra sponsors and adopters. If you want to be on this list, [contact us](mailto:hi@ory.am).\n\n### Sponsors\n\n\"Auth0.com\"\n\nWe are proud to have [Auth0](https://auth0.com) as a **gold sponsor** for ORY Hydra. [Auth0](https://auth0.com) solves\nthe most complex identity use cases with an extensible and easy to integrate platform that secures billions of logins\nevery year. At ORY, we use [Auth0](https://auth0.com) in conjunction with ORY Hydra for various internal projects.\n\n
\n\n### Adopters\n\nORY Hydra is battle-tested in production systems. This is a curated list of ORY Hydra adopters.\n\n\"arduino.cc\"/ \n\n

Arduino is an open-source electronics platform based on easy-to-use hardware and software. It's intended\nfor anyone making interactive projects. ORY Hydra secures Arduino's developer platform.

\n\n
\n\n## ORY Hydra for Enterprise\n\nORY Hydra is available as an Apache 2.0-licensed Open Source technology. In enterprise environments however,\nthere are numerous demands, such as\n\n* OAuth 2.0 and OpenID Connect consulting.\n* security auditing and certification.\n* auditable log trails.\n* guaranteed performance metrics, such as throughput per second.\n* management user interfaces.\n* ... and a wide range of narrow use cases specific to each business demands.\n\nGain access to more features and our security experts with ORY Hydra for Enterprise! [Request details now!](https://docs.google.com/forms/d/e/1FAIpQLSf53GJwQxzIatTSEM7sXhpkWRh6kddKxzNfNAQ9GsLNEfuFRA/viewform)\n\n## Quickstart\n\nThis section is a quickstart guide to working with ORY Hydra. In-depth docs are available as well:\n\n* The documentation is available on [GitBook](https://ory.gitbooks.io/hydra/content/).\n* The REST API documentation is available at [Apiary](http://docs.hydra13.apiary.io/).\n\n### 5 minutes tutorial: Run your very own OAuth2 environment\n\nThe **[tutorial](https://ory.gitbooks.io/hydra/content/tutorial.html)** teaches you to set up ORY Hydra,\na Postgres instance and an exemplary identity provider written in React using docker compose.\nIt will take you about 5 minutes to complete the **[tutorial](https://ory.gitbooks.io/hydra/content/tutorial.html)**.\n\n\"OAuth2\n\n
\n\n### Installation\n\nThere are various ways of installing ORY Hydra on your system.\n\n#### Download binaries\n\nThe client and server **binaries are downloadable at [releases](https://github.com/ory/hydra/releases)**.\nThere is currently no installer available. You have to add the ORY Hydra binary to the PATH environment variable yourself or put\nthe binary in a location that is already in your path (`/usr/bin`, ...). \nIf you do not understand what that all of this means, ask in our [chat channel](https://gitter.im/ory-am/hydra). We are happy to help.\n\n#### Using Docker\n\n**Starting the host** is easiest with docker. The host process handles HTTP requests and is backed by a database.\nRead how to install docker on [Linux](https://docs.docker.com/linux/), [OSX](https://docs.docker.com/mac/) or\n[Windows](https://docs.docker.com/windows/). ORY Hydra is available on [Docker Hub](https://hub.docker.com/r/oryd/hydra/).\n\nYou can use ORY Hydra without a database, but be aware that restarting, scaling\nor stopping the container will **lose all data**:\n\n```\n$ docker run -e \"DATABASE_URL=memory\" -d --name my-hydra -p 4444:4444 oryd/hydra\nec91228cb105db315553499c81918258f52cee9636ea2a4821bdb8226872f54b\n```\n\n*Note: We had to create a new docker hub repository. Tags prior to 0.7.5 are available [here](https://hub.docker.com/r/ory-am/hydra/).*\n\n**Using the client command line interface:** You can ssh into the ORY Hydra container\nand execute the ORY Hydra command from there:\n\n```\n$ docker exec -i -t /bin/bash\n# e.g. docker exec -i -t ec91228 /bin/bash\n\nroot@ec91228cb105:/go/src/github.com/ory/hydra# hydra\nHydra is a twelve factor OAuth2 and OpenID Connect provider\n\n[...]\n```\n\n#### Building from source\n\nIf you wish to compile ORY Hydra yourself, you need to install and set up [Go 1.8+](https://golang.org/) and add `$GOPATH/bin`\nto your `$PATH`. To do so, run the following commands in a shell (bash, sh, cmd.exe, ...):\n\n```\ngo get -d -u github.com/ory/hydra\ngo get github.com/Masterminds/glide\ncd $GOPATH/src/github.com/ory/hydra\nglide install\ngo install github.com/ory/hydra\nhydra\n```\n\n**Notes**\n\n* We changed organization name from `ory-am` to `ory`. In order to keep backwards compatibility, we did not rename Go packages.\n* You can ignore warnings similar to `package github.com/ory/hydra/cmd/server: case-insensitive import collision: \"github.com/Sirupsen/logrus\" and \"github.com/sirupsen/logrus\"`.\n\n## Security\n\n*Why should I use ORY Hydra? It's not that hard to implement two OAuth2 endpoints and there are numerous SDKs out there!*\n\nOAuth2 and OAuth2 related specifications are over 400 written pages. Implementing OAuth2 is easy, getting it right is hard.\nORY Hydra is trusted by companies all around the world, has a vibrant community and faces millions of requests in production\neach day. Of course, we also compiled a security guide with more details on cryptography and security concepts.\nRead [the security guide now](https://ory.gitbooks.io/hydra/content/security.html).\n\n## Telemetry\n\nORY Hydra collects summarized, anonymized telemetry which can optionally be turned off. Click [here](https://ory.gitbooks.io/hydra/content/telemetry.html)\nto learn more.\n\n## Documentation\n\n### Guide\n\nThe Guide is available on [GitBook](https://ory.gitbooks.io/hydra/content/).\n\n### HTTP API documentation\n\nThe HTTP API is documented at [Apiary](http://docs.hydra13.apiary.io/).\n\n### Command line documentation\n\nRun `hydra -h` or `hydra help`.\n\n### Develop\n\nDeveloping with ORY Hydra is as easy as:\n\n```\ngo get -d -u github.com/ory/hydra\ngo get github.com/Masterminds/glide\ncd $GOPATH/src/github.com/ory/hydra\nglide install\ngo test $(glide novendor)\n```\n\nThen run it with in-memory database:\n\n```\nDATABASE_URL=memory go run main.go host\n```\n\n**Notes**\n\n* We changed organization name from `ory-am` to `ory`. In order to keep backwards compatibility, we did not rename Go packages.\n* You can ignore warnings similar to `package github.com/ory/hydra/cmd/server: case-insensitive import collision: \"github.com/Sirupsen/logrus\" and \"github.com/sirupsen/logrus\"`.\n\n## Reception\n\nHydra has received a lot of positive feedback. Let's see what the community is saying:\n\n> Nice! Lowering barriers to the use of technologies like these is important.\n\n[Pyxl101](https://news.ycombinator.com/item?id=11798641)\n\n> OAuth is a framework not a protocol. The security it provides can vary greatly between implementations.\nFosite (which is what this is based on) is a very good implementation from a security perspective: https://github.com/ory/fosite#a-word-on-security\n\n[abritishguy](https://news.ycombinator.com/item?id=11800515)\n\n> [...] Thanks for releasing this by the way, looks really well engineered. [...]\n\n## Libraries and third-party projects\n\nOfficial:\n* [Consent App SDK For NodeJS](https://github.com/ory/hydra-js)\n* [Consent App Example written in Go](https://github.com/ory/hydra-consent-app-go)\n* [Exemplary Consent App with Express and NodeJS](https://github.com/ory/hydra-consent-app-express)\n\nCommunity:\n* [Consent App SDK for Go](https://github.com/janekolszak/idp)\n* [ORY Hydra middleware for Gin](https://github.com/janekolszak/gin-hydra)\n* [Kubernetes helm chart](https://github.com/kubernetes/charts/pull/1022)\n\n## Blog posts & articles\n\n* [Creating an oauth2 custom lamda authorizer for use with Amazons (AWS) API Gateway using Hydra](https://blogs.edwardwilde.com/2017/01/12/creating-an-oauth2-custom-lamda-authorizer-for-use-with-amazons-aws-api-gateway-using-hydra/)\n* Warning, ORY Hydra has changed almost everything since writing this\narticle: [Hydra: Run your own Identity and Access Management service in <5 Minutes](https://blog.gopheracademy.com/advent-2015/hydra-auth/)\n", - "source_links": [], - "id": 54 - }, - { - "page_link": "https://github.com/imgproxy/imgproxy", - "title": "imgproxy readme", - "text": "

\n \n \n \n \n \"imgproxy\n \n \n

\n\n

\n Website |\n Blog |\n Documentation |\n imgproxy Pro |\n Docker |\n Twitter |\n Discord\n

\n\n

\n\"GH\n\"GH\n\"Docker\n

\n\n---\n\n[imgproxy](https://imgproxy.net) is a fast and secure standalone server for resizing and converting remote images. The guiding principles behind imgproxy are security, speed, and simplicity.\n\nimgproxy is able to quickly and easily resize images on the fly, and it's well-equipped to handle a large amount of image resizing. imgproxy is a fast, secure replacement for all the image resizing code inside your web application (such as resizing libraries, or code that calls ImageMagick or GraphicsMagic). It's also an indispensable tool for processing images from a remote source. With imgproxy, you don\u2019t need to repeatedly prepare images to fit your design every time it changes.\n\nTo get an even better introduction, and to dive deeper into the nitty gritty details, check out this article: [imgproxy: Resize your images instantly and securely](https://evilmartians.com/chronicles/introducing-imgproxy)\n\n\n \n \n \n \n\n\n#### Simplicity\n\n> \"No code is better than no code.\"\n\nimgproxy only includes the must-have features for image processing, fine-tuning and security. Specifically,\n\n* It would be great to be able to rotate, flip and apply masks to images, but in most of the cases, it is possible \u2014 and is much easier \u2014 to do that using CSS3.\n* It may be great to have built-in HTTP caching of some kind, but it is way better to use a Content-Delivery Network or a caching proxy server for this, as you will have to do this sooner or later in the production environment.\n* It might be useful to have everything built in \u2014 such as HTTPS support \u2014 but an easy way to solve that would be just to use a proxying HTTP server such as nginx.\n\n#### Speed\n\nimgproxy takes advantage of probably the most efficient image processing library out there \u2013 `libvips`. It\u2019s scary fast and comes with a very low memory footprint. Thanks to libvips, we can readily and extemporaneously process a massive amount of images.\n\nimgproxy uses Go\u2019s raw (no wrappers) native `net/http` package to omit any overhead while processing requests and provides the best possible HTTP support.\n\nYou can take a look at some benchmarking results and compare imgproxy with some well-known alternatives in our [benchmark report](https://github.com/imgproxy/imgproxy/blob/master/BENCHMARK.md).\n\n#### Security\n\nIn terms of security, the massive processing of remote images is a potentially dangerous endeavor. There are a number of possible attack vectors, so it\u2019s a good idea to take an approach that considers attack prevention measures as a priority. Here\u2019s how imgproxy does this:\n\n* imgproxy checks the image type and its \u201creal\u201d dimensions when downloading. The image will not be fully downloaded if it has an unknown format or if the dimensions are too big (you can set the max allowed dimensions). This is how imgproxy protects from so called \"image bombs\u201d, like those described in [this doc](https://www.bamsoftware.com/hacks/deflate.html).\n\n* imgproxy protects image URLs with a signature, so an attacker cannot enact a denial-of-service attack by requesting multiple image resizes.\n\n* imgproxy supports authorization by HTTP header. This prevents imgproxy from being used directly by an attacker, but allows it to be used via a CDN or a caching server \u2014 simply by adding a header to a proxy or CDN config.\n\n## Usage\n\nCheck out our \ud83d\udcd1 [Documentation](https://docs.imgproxy.net).\n\n## Author\n\nSergey \"[DarthSim](https://github.com/DarthSim)\" Alexandrovich\n\n## Special thanks\n\nMany thanks to:\n\n* [Roman Shamin](https://github.com/romashamin) for the awesome logo.\n* [Alena Kirdina](https://github.com/egodyston) and [Alexander Madyankin](https://github.com/madyankin) for the great website.\n* [John Cupitt](https://github.com/jcupitt) for developing [libvips](https://github.com/libvips/libvips) and for helping me optimize its usage with imgproxy.\n* [Kirill Kuznetsov](https://github.com/dragonsmith) for the [Helm chart](https://github.com/imgproxy/imgproxy-helm).\n* [Travis Turner](https://github.com/Travis-Turner) for keeping the documentation in good shape.\n\n## License\n\nimgproxy is licensed under the MIT license.\n\nSee [LICENSE](https://github.com/imgproxy/imgproxy/blob/master/LICENSE) for the full license text.\n\n## Security Contact\n\nTo report a security vulnerability, please contact us at security@imgproxy.net. We will coordinate the fix and disclosure.\n", - "source_links": [], - "id": 55 - }, - { - "page_link": "https://github.com/influxdata/influxdb", - "title": "influx readme", - "text": "# InfluxDB\n
\n \"InfluxDB\n
\n\n

\n \n \"CircleCI\"\n \n \n \n \"Slack\n \n \n \n \"Docker\n \n \n \n \"Docker\n \n

\n

\n Website\n \u2022\n Documentation\n \u2022\n InfluxDB University\n \u2022\n Blog\n

\n\n---\n\nInfluxDB is an open source time series platform. This includes APIs for storing and querying data, processing it in the background for ETL or monitoring and alerting purposes, user dashboards, and visualizing and exploring the data and more. The master branch on this repo now represents the latest InfluxDB, which now includes functionality for Kapacitor (background processing) and Chronograf (the UI) all in a single binary.\n\nThe list of InfluxDB Client Libraries that are compatible with the latest version can be found in [our documentation](https://docs.influxdata.com/influxdb/latest/tools/client-libraries/).\n\nIf you are looking for the 1.x line of releases, there are branches for each minor version as well as a `master-1.x` branch that will contain the code for the next 1.x release. The master-1.x [working branch is here](https://github.com/influxdata/influxdb/tree/master-1.x). The [InfluxDB 1.x Go Client can be found here](https://github.com/influxdata/influxdb1-client).\n\n| Try **InfluxDB Cloud** for free and get started fast with no local setup required. Click [**here**](https://cloud2.influxdata.com/signup) to start building your application on InfluxDB Cloud. |\n|:------|\n\n## Install\n\nWe have nightly and versioned Docker images, Debian packages, RPM packages, and tarballs of InfluxDB available at the [InfluxData downloads page](https://portal.influxdata.com/downloads/). We also provide the `influx` command line interface (CLI) client as a separate binary available at the same location.\n\nIf you are interested in building from source, see the [building from source](CONTRIBUTING.md#building-from-source) guide for contributors.\n\n\n \n\n\n## Get Started\n\nFor a complete getting started guide, please see our full [online documentation site](https://docs.influxdata.com/influxdb/latest/).\n\nTo write and query data or use the API in any way, you'll need to first create a user, credentials, organization and bucket.\nEverything in InfluxDB is organized under a concept of an organization. The API is designed to be multi-tenant.\nBuckets represent where you store time series data.\nThey're synonymous with what was previously in InfluxDB 1.x a database and retention policy.\n\nThe simplest way to get set up is to point your browser to [http://localhost:8086](http://localhost:8086) and go through the prompts.\n\nYou can also get set up from the CLI using the command `influx setup`:\n\n\n```bash\n$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup\nWelcome to InfluxDB 2.0!\nPlease type your primary username: marty\n\nPlease type your password:\n\nPlease type your password again:\n\nPlease type your primary organization name.: InfluxData\n\nPlease type your primary bucket name.: telegraf\n\nPlease type your retention period in hours.\nOr press ENTER for infinite.: 72\n\n\nYou have entered:\n Username: marty\n Organization: InfluxData\n Bucket: telegraf\n Retention Period: 72 hrs\nConfirm? (y/n): y\n\nUserID Username Organization Bucket\n033a3f2c5ccaa000 marty InfluxData Telegraf\nYour token has been stored in /Users/marty/.influxdbv2/credentials\n```\n\nYou can run this command non-interactively using the `-f, --force` flag if you are automating the setup.\nSome added flags can help:\n```bash\n$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup \\\n--username marty \\\n--password F1uxKapacit0r85 \\\n--org InfluxData \\\n--bucket telegraf \\\n--retention 168 \\\n--token where-were-going-we-dont-need-roads \\\n--force\n```\n\nOnce setup is complete, a configuration profile is created to allow you to interact with your local InfluxDB without passing in credentials each time. You can list and manage those profiles using the `influx config` command.\n```bash\n$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx config\nActive\tName\tURL\t\t\t Org\n*\t default\thttp://localhost:8086\tInfluxData\n```\n\n## Write Data\nWrite to measurement `m`, with tag `v=2`, in bucket `telegraf`, which belongs to organization `InfluxData`:\n\n```bash\n$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx write --bucket telegraf --precision s \"m v=2 $(date +%s)\"\n```\n\nSince you have a default profile set up, you can omit the Organization and Token from the command.\n\nWrite the same point using `curl`:\n\n```bash\ncurl --header \"Authorization: Token $(bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx auth list --json | jq -r '.[0].token')\" \\\n--data-raw \"m v=2 $(date +%s)\" \\\n\"http://localhost:8086/api/v2/write?org=InfluxData&bucket=telegraf&precision=s\"\n```\n\nRead that back with a simple Flux query:\n\n```bash\n$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx query 'from(bucket:\"telegraf\") |> range(start:-1h)'\nResult: _result\nTable: keys: [_start, _stop, _field, _measurement]\n _start:time _stop:time _field:string _measurement:string _time:time _value:float\n------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------\n2019-12-30T22:19:39.043918000Z 2019-12-30T23:19:39.043918000Z v m 2019-12-30T23:17:02.000000000Z 2\n```\n\nUse the `-r, --raw` option to return the raw flux response from the query. This is useful for moving data from one instance to another as the `influx write` command can accept the Flux response using the `--format csv` option.\n\n## Script with Flux\n\nFlux (previously named IFQL) is an open source functional data scripting language designed for querying, analyzing, and acting on data. Flux supports multiple data source types, including:\n\n- Time series databases (such as InfluxDB)\n- Relational SQL databases (such as MySQL and PostgreSQL)\n- CSV\n\nThe source for Flux is [available on GitHub](https://github.com/influxdata/flux).\nTo learn more about Flux, see the latest [InfluxData Flux documentation](https://docs.influxdata.com/flux/) and [CTO Paul Dix's presentation](https://speakerdeck.com/pauldix/flux-number-fluxlang-a-new-time-series-data-scripting-language).\n\n## Contribute to the Project\n\nInfluxDB is an [MIT licensed](LICENSE) open source project and we love our community. The fastest way to get something fixed is to open a PR. Check out our [contributing](CONTRIBUTING.md) guide if you're interested in helping out. Also, join us on our [Community Slack Workspace](https://influxdata.com/slack) if you have questions or comments for our engineering teams.\n\n## CI and Static Analysis\n\n### CI\n\nAll pull requests will run through CI, which is currently hosted by Circle.\nCommunity contributors should be able to see the outcome of this process by looking at the checks on their PR.\nPlease fix any issues to ensure a prompt review from members of the team.\n\nThe InfluxDB project is used internally in a number of proprietary InfluxData products, and as such, PRs and changes need to be tested internally.\nThis can take some time, and is not really visible to community contributors.\n\n### Static Analysis\n\nThis project uses the following static analysis tools.\nFailure during the running of any of these tools results in a failed build.\nGenerally, code must be adjusted to satisfy these tools, though there are exceptions.\n\n- [go vet](https://golang.org/cmd/vet/) checks for Go code that should be considered incorrect.\n- [go fmt](https://golang.org/cmd/gofmt/) checks that Go code is correctly formatted.\n- [go mod tidy](https://tip.golang.org/cmd/go/#hdr-Add_missing_and_remove_unused_modules) ensures that the source code and go.mod agree.\n- [staticcheck](https://staticcheck.io/docs/) checks for things like: unused code, code that can be simplified, code that is incorrect and code that will have performance issues.\n\n### staticcheck\n\nIf your PR fails `staticcheck` it is easy to dig into why it failed, and also to fix the problem.\nFirst, take a look at the error message in Circle under the `staticcheck` build section, e.g.,\n\n```\ntsdb/tsm1/encoding.gen.go:1445:24: func BooleanValues.assertOrdered is unused (U1000)\ntsdb/tsm1/encoding.go:172:7: receiver name should not be an underscore, omit the name if it is unused (ST1006)\n```\n\nNext, go and take a [look here](http://next.staticcheck.io/docs/checks) for some clarification on the error code that you have received, e.g., `U1000`.\nThe docs will tell you what's wrong, and often what you need to do to fix the issue.\n\n#### Generated Code\n\nSometimes generated code will contain unused code or occasionally that will fail a different check.\n`staticcheck` allows for [entire files](http://next.staticcheck.io/docs/#ignoring-problems) to be ignored, though it's not ideal.\nA linter directive, in the form of a comment, must be placed within the generated file.\nThis is problematic because it will be erased if the file is re-generated.\nUntil a better solution comes about, below is the list of generated files that need an ignores comment.\nIf you re-generate a file and find that `staticcheck` has failed, please see this list below for what you need to put back:\n\n| File | Comment |\n| :--------------------: | :--------------------------------------------------------------: |\n| query/promql/promql.go | //lint:file-ignore SA6001 Ignore all unused code, it's generated |\n\n#### End-to-End Tests\n\nCI also runs end-to-end tests. These test the integration between the `influxd` server the UI.\nSince the UI is used by interal repositories as well as the `influxdb` repository, the\nend-to-end tests cannot be run on forked pull requests or run locally. The extent of end-to-end\ntesting required for forked pull requests will be determined as part of the review process.\n\n## Additional Resources\n- [InfluxDB Tips and Tutorials](https://www.influxdata.com/blog/category/tech/influxdb/)\n- [InfluxDB Essentials Course](https://university.influxdata.com/courses/influxdb-essentials-tutorial/)\n- [Exploring InfluxDB Cloud Course](https://university.influxdata.com/courses/exploring-influxdb-cloud-tutorial/)", - "source_links": [], - "id": 56 - }, - { - "page_link": "https://github.com/kubernetes/ingress-nginx", - "title": "ingress-nginx readme", - "text": "# Ingress NGINX Controller\n\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/5691/badge)](https://bestpractices.coreinfrastructure.org/projects/5691)\n[![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes/ingress-nginx)](https://goreportcard.com/report/github.com/kubernetes/ingress-nginx)\n[![GitHub license](https://img.shields.io/github/license/kubernetes/ingress-nginx.svg)](https://github.com/kubernetes/ingress-nginx/blob/main/LICENSE)\n[![GitHub stars](https://img.shields.io/github/stars/kubernetes/ingress-nginx.svg)](https://github.com/kubernetes/ingress-nginx/stargazers)\n[![GitHub stars](https://img.shields.io/badge/contributions-welcome-orange.svg)](https://github.com/kubernetes/ingress-nginx/blob/main/CONTRIBUTING.md)\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fkubernetes%2Fingress-nginx.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fkubernetes%2Fingress-nginx?ref=badge_shield)\n\nPlease fill out our 2022 Ingress-Nginx User Survey and let us know what you want to see in future releases.\n\nhttps://www.surveymonkey.com/r/ingressngx2022\n\n## Overview\n\ningress-nginx is an Ingress controller for Kubernetes using [NGINX](https://www.nginx.org/) as a reverse proxy and load balancer.\n\n[Learn more about Ingress on the main Kubernetes documentation site](https://kubernetes.io/docs/concepts/services-networking/ingress/).\n\n## Get started\n\nSee the [Getting Started](https://kubernetes.github.io/ingress-nginx/deploy/) document.\n\n## Troubleshooting\n\nIf you encounter issues, review the [troubleshooting docs](docs/troubleshooting.md), [file an issue](https://github.com/kubernetes/ingress-nginx/issues), or talk to us on the [#ingress-nginx channel](https://kubernetes.slack.com/messages/ingress-nginx) on the Kubernetes Slack server.\n\n## Changelog\n\nSee [the list of releases](https://github.com/kubernetes/ingress-nginx/releases) to find out about feature changes.\nFor detailed changes for each release; please check the [Changelog.md](Changelog.md) file.\nFor detailed changes on the `ingress-nginx` helm chart, please check the following [CHANGELOG.md](charts/ingress-nginx/CHANGELOG.md) file.\n\n### Support Versions table \n\n| Ingress-NGINX version | k8s supported version | Alpine Version | Nginx Version |\n|-----------------------|------------------------------|----------------|---------------|\n| v1.3.1 | 1.24, 1.23, 1.22, 1.21, 1.20 | 3.16.2 | 1.19.10\u2020 |\n| v1.3.0 | 1.24, 1.23, 1.22, 1.21, 1.20 | 3.16.0 | 1.19.10\u2020 |\n| v1.2.1 | 1.23, 1.22, 1.21, 1.20, 1.19 | 3.14.6 | 1.19.10\u2020 |\n| v1.1.3 | 1.23, 1.22, 1.21, 1.20, 1.19 | 3.14.4 | 1.19.10\u2020 |\n| v1.1.2 | 1.23, 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.1.1 | 1.23, 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.1.0 | 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.0.5 | 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.0.4 | 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.0.3 | 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.0.2 | 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.0.1 | 1.22, 1.21, 1.20, 1.19 | 3.14.2 | 1.19.9\u2020 |\n| v1.0.0 | 1.22, 1.21, 1.20, 1.19 | 3.13.5 | 1.20.1 |\n\n\n\u2020 _This build is [patched against CVE-2021-23017](https://github.com/openresty/openresty/commit/4b5ec7edd78616f544abc194308e0cf4b788725b#diff-42ef841dc27fe0b5aa2d06bd31308bb63a59cdcddcbcddd917248349d22020a3)._\n\nSee [this article](https://kubernetes.io/blog/2021/07/26/update-with-ingress-nginx/) if you want upgrade to the stable Ingress API. \n\n## Get Involved\n\nThanks for taking the time to join our community and start contributing!\n\n- This project adheres to the [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md). By participating in this project, you agree to abide by its terms.\n\n- **Contributing**: Contributions of all kind are welcome!\n \n - Read [`CONTRIBUTING.md`](CONTRIBUTING.md) for information about setting up your environment, the workflow that we expect, and instructions on the developer certificate of origin that we require.\n\n - Join our Kubernetes Slack channel for developer discussion : [#ingress-nginx-dev](https://kubernetes.slack.com/archives/C021E147ZA4).\n \n - Submit github issues for any feature enhancements, bugs or documentation problems. Please make sure to read the [Issue Reporting Checklist](https://github.com/kubernetes/ingress-nginx/blob/main/CONTRIBUTING.md#issue-reporting-guidelines) before opening an issue. Issues not conforming to the guidelines **may be closed immediately**.\n\n- **Support**: Join the [#ingress-nginx-users](https://kubernetes.slack.com/messages/CANQGM8BA/) channel inside the [Kubernetes Slack](http://slack.kubernetes.io/) to ask questions or get support from the maintainers and other users.\n \n - The [GitHub issues](https://github.com/kubernetes/ingress-nginx/issues) in the repository are **exclusively** for bug reports and feature requests.\n\n- **Discuss**: Tweet using the `#IngressNginx` hashtag.\n\n## License\n\n[Apache License 2.0](https://github.com/kubernetes/ingress-nginx/blob/main/LICENSE)\n", - "source_links": [], - "id": 57 - }, - { - "page_link": "https://github.com/istio/istio", - "title": "istio readme", - "text": "# Istio\n\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1395/badge)](https://bestpractices.coreinfrastructure.org/projects/1395)\n[![Go Report Card](https://goreportcard.com/badge/github.com/istio/istio)](https://goreportcard.com/report/github.com/istio/istio)\n[![GoDoc](https://godoc.org/istio.io/istio?status.svg)](https://godoc.org/istio.io/istio)\n\n\n \"Istio\n\n\n---\n\nAn open platform to connect, manage, and secure microservices.\n\n- For in-depth information about how to use Istio, visit [istio.io](https://istio.io)\n- To ask questions and get assistance from our community, visit [discuss.istio.io](https://discuss.istio.io)\n- To learn how to participate in our overall community, visit [our community page](https://istio.io/about/community)\n\nIn this README:\n\n- [Introduction](#introduction)\n- [Repositories](#repositories)\n- [Issue management](#issue-management)\n\nIn addition, here are some other documents you may wish to read:\n\n- [Istio Community](https://github.com/istio/community#istio-community) - describes how to get involved and contribute to the Istio project\n- [Istio Developer's Guide](https://github.com/istio/istio/wiki/Preparing-for-Development) - explains how to set up and use an Istio development environment\n- [Project Conventions](https://github.com/istio/istio/wiki/Development-Conventions) - describes the conventions we use within the code base\n- [Creating Fast and Lean Code](https://github.com/istio/istio/wiki/Writing-Fast-and-Lean-Code) - performance-oriented advice and guidelines for the code base\n\nYou'll find many other useful documents on our [Wiki](https://github.com/istio/istio/wiki).\n\n## Introduction\n\n[Istio](https://istio.io/latest/docs/concepts/what-is-istio/) is an open platform for providing a uniform way to [integrate\nmicroservices](https://istio.io/latest/docs/examples/microservices-istio/), manage [traffic flow](https://istio.io/latest/docs/concepts/traffic-management/) across microservices, enforce policies\nand aggregate telemetry data. Istio's control plane provides an abstraction\nlayer over the underlying cluster management platform, such as Kubernetes.\n\nIstio is composed of these components:\n\n- **Envoy** - Sidecar proxies per microservice to handle ingress/egress traffic\n between services in the cluster and from a service to external\n services. The proxies form a _secure microservice mesh_ providing a rich\n set of functions like discovery, rich layer-7 routing, circuit breakers,\n policy enforcement and telemetry recording/reporting\n functions.\n\n > Note: The service mesh is not an overlay network. It\n > simplifies and enhances how microservices in an application talk to each\n > other over the network provided by the underlying platform.\n\n- **Istiod** - The Istio control plane. It provides service discovery, configuration and certificate management. It consists of the following sub-components:\n\n - **Pilot** - Responsible for configuring the proxies at runtime.\n\n - **Citadel** - Responsible for certificate issuance and rotation.\n\n - **Galley** - Responsible for validating, ingesting, aggregating, transforming and distributing config within Istio.\n\n- **Operator** - The component provides user friendly options to operate the Istio service mesh.\n\n## Repositories\n\nThe Istio project is divided across a few GitHub repositories:\n\n- [istio/api](https://github.com/istio/api). This repository defines\ncomponent-level APIs and common configuration formats for the Istio platform.\n\n- [istio/community](https://github.com/istio/community). This repository contains\ninformation on the Istio community, including the various documents that govern\nthe Istio open source project.\n\n- [istio/istio](README.md). This is the main code repository. It hosts Istio's\ncore components, install artifacts, and sample programs. It includes:\n\n - [istioctl](istioctl/). This directory contains code for the\n[_istioctl_](https://istio.io/latest/docs/reference/commands/istioctl/) command line utility.\n\n - [operator](operator/). This directory contains code for the\n[Istio Operator](https://istio.io/latest/docs/setup/install/operator/).\n\n - [pilot](pilot/). This directory\ncontains platform-specific code to populate the\n[abstract service model](https://istio.io/docs/concepts/traffic-management/#pilot), dynamically reconfigure the proxies\nwhen the application topology changes, as well as translate\n[routing rules](https://istio.io/latest/docs/reference/config/networking/) into proxy specific configuration.\n\n - [security](security/). This directory contains [security](https://istio.io/latest/docs/concepts/security/) related code,\nincluding Citadel (acting as Certificate Authority), citadel agent, etc.\n\n- [istio/proxy](https://github.com/istio/proxy). The Istio proxy contains\nextensions to the [Envoy proxy](https://github.com/envoyproxy/envoy) (in the form of\nEnvoy filters) that support authentication, authorization, and telemetry collection.\n\n## Issue management\n\nWe use GitHub to track all of our bugs and feature requests. Each issue we track has a variety of metadata:\n\n- **Epic**. An epic represents a feature area for Istio as a whole. Epics are fairly broad in scope and are basically product-level things.\nEach issue is ultimately part of an epic.\n\n- **Milestone**. Each issue is assigned a milestone. This is 0.1, 0.2, ..., or 'Nebulous Future'. The milestone indicates when we\nthink the issue should get addressed.\n\n- **Priority**. Each issue has a priority which is represented by the column in the [Prioritization](https://github.com/orgs/istio/projects/6) project. Priority can be one of\nP0, P1, P2, or >P2. The priority indicates how important it is to address the issue within the milestone. P0 says that the\nmilestone cannot be considered achieved if the issue isn't resolved.\n", - "source_links": [], - "id": 58 - }, - { - "page_link": "https://github.com/jenkinsci/jenkins", - "title": "jenkins readme", - "text": "\n \n \n \n \n\n\n# About\n\n[![Jenkins Regular Release](https://img.shields.io/endpoint?url=https%3A%2F%2Fwww.jenkins.io%2Fchangelog%2Fbadge.json)](https://www.jenkins.io/changelog)\n[![Jenkins LTS Release](https://img.shields.io/endpoint?url=https%3A%2F%2Fwww.jenkins.io%2Fchangelog-stable%2Fbadge.json)](https://www.jenkins.io/changelog-stable)\n[![Docker Pulls](https://img.shields.io/docker/pulls/jenkins/jenkins.svg)](https://hub.docker.com/r/jenkins/jenkins/)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3538/badge)](https://bestpractices.coreinfrastructure.org/projects/3538)\n\nIn a nutshell, Jenkins is the leading open-source automation server.\nBuilt with Java, it provides over 1,800 [plugins](https://plugins.jenkins.io/) to support automating virtually anything,\nso that humans can spend their time doing things machines cannot.\n\n# What to Use Jenkins for and When to Use It\n\nUse Jenkins to automate your development workflow, so you can focus on work that matters most. Jenkins is commonly used for:\n\n- Building projects\n- Running tests to detect bugs and other issues as soon as they are introduced\n- Static code analysis\n- Deployment\n\nExecute repetitive tasks, save time, and optimize your development process with Jenkins.\n\n# Downloads\n\nThe Jenkins project provides official distributions as WAR files, Docker images, native packages and installers for platforms including several Linux distributions and Windows.\nSee the [Downloads](https://www.jenkins.io/download) page for references.\n\nFor all distributions Jenkins offers two release lines:\n\n- [Weekly](https://www.jenkins.io/download/weekly/) -\n Frequent releases which include all new features, improvements, and bug fixes.\n- [Long-Term Support (LTS)](https://www.jenkins.io/download/lts/) -\n Older release line which gets periodically updated via bug fix backports.\n\nLatest releases:\n[![Jenkins Regular Release](https://img.shields.io/endpoint?url=https%3A%2F%2Fwww.jenkins.io%2Fchangelog%2Fbadge.json)](https://www.jenkins.io/changelog)\n[![Jenkins LTS Release](https://img.shields.io/endpoint?url=https%3A%2F%2Fwww.jenkins.io%2Fchangelog-stable%2Fbadge.json)](https://www.jenkins.io/changelog-stable)\n\n# Source\n\nOur latest and greatest source of Jenkins can be found on [GitHub](https://github.com/jenkinsci/jenkins). Fork us!\n\n# Contributing to Jenkins\n\nFollow the [contributing guidelines](CONTRIBUTING.md) if you want to propose a change in the Jenkins core.\nFor more information about participating in the community and contributing to the Jenkins project,\nsee [this page](https://www.jenkins.io/participate/).\n\nDocumentation for Jenkins core maintainers is in the [maintainers guidelines](docs/MAINTAINERS.adoc).\n\n# News and Website\n\nAll information about Jenkins can be found on our [website](https://www.jenkins.io/).\nFollow us on [Twitter](https://twitter.com/jenkinsci) or [LinkedIn](https://www.linkedin.com/company/jenkins-project/).\n\n# Governance\n\nSee the [Jenkins Governance Document](https://www.jenkins.io/project/governance/) for information about the project's open governance, our philosophy and values, and development practices.\nJenkins Code of Conduct can be found [here](https://www.jenkins.io/project/conduct/).\n\n# Adopters\n\nJenkins is used by millions of users and thousands of companies.\nSee [adopters](https://www.jenkins.io/project/adopters/) for the list of Jenkins adopters and their success stories.\n\n# License\n\nJenkins is **licensed** under the **[MIT License](https://github.com/jenkinsci/jenkins/blob/master/LICENSE.txt)**.\n", - "source_links": [], - "id": 59 - }, - { - "page_link": "https://github.com/jitsucom/jitsu", - "title": "jitsu readme", - "text": "

\n \n \n \n

\n\n
\n\n\n\n

\n\"Latest\n\"Jitsu\n\"Jitsu\n\"Master\n\"License\"\n

\n\n\n \n \n
\n \n \n \n
\n\n**[Jitsu](https://jitsu.com/?utm_source=gh)** is an open source high-performance data collection service. It can:\n\n* Capture events your application generates and stream to Data Warehouse;\n* Pull data from APIs and save it to Data Warehouse\n\nRead more about [our features](https://jitsu.com/#features) and check out the [platform overview](https://jitsu.com/overview)!\n\n\n## Quick Start\n\nTwo easiest ways to start Jitsu are Heroku deployment and local docker-compose. \n\n### 1-click Heroku deploy\nIt may take up to 5 minutes for Heroku to install environment. \nAfter that you can visit `.herokuapp.com`\n\n\n\n### Docker Compose\nStart Jitsu using docker-compose:\n\n```bash\ngit clone https://github.com/jitsucom/jitsu.git\ncd jitsu\n```\n\nAdd permission for writing log files:\n\n```bash\n#Ubuntu/Mac OS\nchmod -R 777 compose-data/\n```\n\nFor running `latest` version use:\n\n```bash\ndocker-compose up\n```\n\nNote: `latest` image will be downloaded and started.\n\nVisit `http://localhost:8000/configurator` after the build is complete.\n\nTo learn more check out [Jitsu deployment documentation](https://jitsu.com/docs/deployment/):\n\n- [Docker deployment](https://jitsu.com/docs/deployment/deploy-with-docker)\n- [Heroku Deployment](https://jitsu.com/docs/deployment/deploy-on-heroku)\n- [Plural Deployment (On Kubernetes)](https://jitsu.com/docs/deployment/deploy-on-plural) \n- [Building from sources](https://jitsu.com/docs/deployment/build-from-sources)\n\nAlso, we maintain a [Jitsu.Cloud](https://cloud.jitsu.com) \u2014 a hosted version of Jitsu. Jitsu.Cloud [is free](https://jitsu.com/pricing) for up to 250,000 events per month. Each\nproject comes with demo PostgresSQL Database (up 10,000 records).\n\n\n\n## Documentation\n\nPlease see our extensive documentation [here](https://jitsu.com/docs). Key sections include:\n\n* [Deployment](https://jitsu.com/docs/deployment) - Getting Jitsu running on Heroku, Docker, and building from source.\n* [Configuration](https://jitsu.com/docs/configuration) - How to modify Jitsu Server's `yaml` file.\n* [Geo Data](https://jitsu.com/docs/geo-data-resolution) - Configuring data enrichment with [MaxMind](https://www.maxmind.com/en/home).\n* [Scaling](https://jitsu.com/docs/other-features/scaling-eventnative) - How to setup a distributed deployment of Jitsu.\n\n\n\n## Reporting Bugs and Contributing Code\n\n* Want to report a bug or request a feature? Please open [an issue](https://github.com/jitsucom/jitsu/issues/new).\n* Want to help us build **Jitsu**? Fork the project, and check our an issues [that are good for first pull request](https://github.com/jitsucom/jitsu/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+first+issue%22)!\n* Questions? Join our [Slack](https://jitsu.com/slack)!\n* [hello@jitsu.com](mailto:hello@jitsu.com) - send us an email if you have any questions!\n", - "source_links": [], - "id": 60 - }, - { - "page_link": "https://github.com/jupyterhub/jupyterhub", - "title": "jupyterhub readme", - "text": "**[Technical Overview](#technical-overview)** |\n**[Installation](#installation)** |\n**[Configuration](#configuration)** |\n**[Docker](#docker)** |\n**[Contributing](#contributing)** |\n**[License](#license)** |\n**[Help and Resources](#help-and-resources)**\n\n---\n\n# [JupyterHub](https://github.com/jupyterhub/jupyterhub)\n\n[![Latest PyPI version](https://img.shields.io/pypi/v/jupyterhub?logo=pypi)](https://pypi.python.org/pypi/jupyterhub)\n[![Latest conda-forge version](https://img.shields.io/conda/vn/conda-forge/jupyterhub?logo=conda-forge)](https://anaconda.org/conda-forge/jupyterhub)\n[![Documentation build status](https://img.shields.io/readthedocs/jupyterhub?logo=read-the-docs)](https://jupyterhub.readthedocs.org/en/latest/)\n[![GitHub Workflow Status - Test](https://img.shields.io/github/workflow/status/jupyterhub/jupyterhub/Test?logo=github&label=tests)](https://github.com/jupyterhub/jupyterhub/actions)\n[![DockerHub build status](https://img.shields.io/docker/build/jupyterhub/jupyterhub?logo=docker&label=build)](https://hub.docker.com/r/jupyterhub/jupyterhub/tags)\n[![Test coverage of code](https://codecov.io/gh/jupyterhub/jupyterhub/branch/main/graph/badge.svg)](https://codecov.io/gh/jupyterhub/jupyterhub)\n[![GitHub](https://img.shields.io/badge/issue_tracking-github-blue?logo=github)](https://github.com/jupyterhub/jupyterhub/issues)\n[![Discourse](https://img.shields.io/badge/help_forum-discourse-blue?logo=discourse)](https://discourse.jupyter.org/c/jupyterhub)\n[![Gitter](https://img.shields.io/badge/social_chat-gitter-blue?logo=gitter)](https://gitter.im/jupyterhub/jupyterhub)\n\nWith [JupyterHub](https://jupyterhub.readthedocs.io) you can create a\n**multi-user Hub** that spawns, manages, and proxies multiple instances of the\nsingle-user [Jupyter notebook](https://jupyter-notebook.readthedocs.io)\nserver.\n\n[Project Jupyter](https://jupyter.org) created JupyterHub to support many\nusers. The Hub can offer notebook servers to a class of students, a corporate\ndata science workgroup, a scientific research project, or a high-performance\ncomputing group.\n\n## Technical overview\n\nThree main actors make up JupyterHub:\n\n- multi-user **Hub** (tornado process)\n- configurable http **proxy** (node-http-proxy)\n- multiple **single-user Jupyter notebook servers** (Python/Jupyter/tornado)\n\nBasic principles for operation are:\n\n- Hub launches a proxy.\n- The Proxy forwards all requests to Hub by default.\n- Hub handles login and spawns single-user servers on demand.\n- Hub configures proxy to forward URL prefixes to the single-user notebook\n servers.\n\nJupyterHub also provides a\n[REST API][]\nfor administration of the Hub and its users.\n\n[rest api]: https://jupyterhub.readthedocs.io/en/latest/reference/rest-api.html\n\n## Installation\n\n### Check prerequisites\n\n- A Linux/Unix based system\n- [Python](https://www.python.org/downloads/) 3.6 or greater\n- [nodejs/npm](https://www.npmjs.com/)\n\n - If you are using **`conda`**, the nodejs and npm dependencies will be installed for\n you by conda.\n\n - If you are using **`pip`**, install a recent version (at least 12.0) of\n [nodejs/npm](https://docs.npmjs.com/getting-started/installing-node).\n\n- If using the default PAM Authenticator, a [pluggable authentication module (PAM)](https://en.wikipedia.org/wiki/Pluggable_authentication_module).\n- TLS certificate and key for HTTPS communication\n- Domain name\n\n### Install packages\n\n#### Using `conda`\n\nTo install JupyterHub along with its dependencies including nodejs/npm:\n\n```bash\nconda install -c conda-forge jupyterhub\n```\n\nIf you plan to run notebook servers locally, install JupyterLab or Jupyter notebook:\n\n```bash\nconda install jupyterlab\nconda install notebook\n```\n\n#### Using `pip`\n\nJupyterHub can be installed with `pip`, and the proxy with `npm`:\n\n```bash\nnpm install -g configurable-http-proxy\npython3 -m pip install jupyterhub\n```\n\nIf you plan to run notebook servers locally, you will need to install\n[JupyterLab or Jupyter notebook](https://jupyter.readthedocs.io/en/latest/install.html):\n\n python3 -m pip install --upgrade jupyterlab\n python3 -m pip install --upgrade notebook\n\n### Run the Hub server\n\nTo start the Hub server, run the command:\n\n jupyterhub\n\nVisit `http://localhost:8000` in your browser, and sign in with your system username and password.\n\n_Note_: To allow multiple users to sign in to the server, you will need to\nrun the `jupyterhub` command as a _privileged user_, such as root.\nThe [wiki](https://github.com/jupyterhub/jupyterhub/wiki/Using-sudo-to-run-JupyterHub-without-root-privileges)\ndescribes how to run the server as a _less privileged user_, which requires\nmore configuration of the system.\n\n## Configuration\n\nThe [Getting Started](https://jupyterhub.readthedocs.io/en/latest/tutorial/index.html#getting-started) section of the\ndocumentation explains the common steps in setting up JupyterHub.\n\nThe [**JupyterHub tutorial**](https://github.com/jupyterhub/jupyterhub-tutorial)\nprovides an in-depth video and sample configurations of JupyterHub.\n\n### Create a configuration file\n\nTo generate a default config file with settings and descriptions:\n\n jupyterhub --generate-config\n\n### Start the Hub\n\nTo start the Hub on a specific url and port `10.0.1.2:443` with **https**:\n\n jupyterhub --ip 10.0.1.2 --port 443 --ssl-key my_ssl.key --ssl-cert my_ssl.cert\n\n### Authenticators\n\n| Authenticator | Description |\n| ---------------------------------------------------------------------------- | ------------------------------------------------- |\n| PAMAuthenticator | Default, built-in authenticator |\n| [OAuthenticator](https://github.com/jupyterhub/oauthenticator) | OAuth + JupyterHub Authenticator = OAuthenticator |\n| [ldapauthenticator](https://github.com/jupyterhub/ldapauthenticator) | Simple LDAP Authenticator Plugin for JupyterHub |\n| [kerberosauthenticator](https://github.com/jupyterhub/kerberosauthenticator) | Kerberos Authenticator Plugin for JupyterHub |\n\n### Spawners\n\n| Spawner | Description |\n| -------------------------------------------------------------- | -------------------------------------------------------------------------- |\n| LocalProcessSpawner | Default, built-in spawner starts single-user servers as local processes |\n| [dockerspawner](https://github.com/jupyterhub/dockerspawner) | Spawn single-user servers in Docker containers |\n| [kubespawner](https://github.com/jupyterhub/kubespawner) | Kubernetes spawner for JupyterHub |\n| [sudospawner](https://github.com/jupyterhub/sudospawner) | Spawn single-user servers without being root |\n| [systemdspawner](https://github.com/jupyterhub/systemdspawner) | Spawn single-user notebook servers using systemd |\n| [batchspawner](https://github.com/jupyterhub/batchspawner) | Designed for clusters using batch scheduling software |\n| [yarnspawner](https://github.com/jupyterhub/yarnspawner) | Spawn single-user notebook servers distributed on a Hadoop cluster |\n| [wrapspawner](https://github.com/jupyterhub/wrapspawner) | WrapSpawner and ProfilesSpawner enabling runtime configuration of spawners |\n\n## Docker\n\nA starter [**docker image for JupyterHub**](https://hub.docker.com/r/jupyterhub/jupyterhub/)\ngives a baseline deployment of JupyterHub using Docker.\n\n**Important:** This `jupyterhub/jupyterhub` image contains only the Hub itself,\nwith no configuration. In general, one needs to make a derivative image, with\nat least a `jupyterhub_config.py` setting up an Authenticator and/or a Spawner.\nTo run the single-user servers, which may be on the same system as the Hub or\nnot, Jupyter Notebook version 4 or greater must be installed.\n\nThe JupyterHub docker image can be started with the following command:\n\n docker run -p 8000:8000 -d --name jupyterhub jupyterhub/jupyterhub jupyterhub\n\nThis command will create a container named `jupyterhub` that you can\n**stop and resume** with `docker stop/start`.\n\nThe Hub service will be listening on all interfaces at port 8000, which makes\nthis a good choice for **testing JupyterHub on your desktop or laptop**.\n\nIf you want to run docker on a computer that has a public IP then you should\n(as in MUST) **secure it with ssl** by adding ssl options to your docker\nconfiguration or by using an ssl enabled proxy.\n\n[Mounting volumes](https://docs.docker.com/engine/admin/volumes/volumes/) will\nallow you to **store data outside the docker image (host system) so it will be persistent**, even when you start\na new image.\n\nThe command `docker exec -it jupyterhub bash` will spawn a root shell in your docker\ncontainer. You can **use the root shell to create system users in the container**.\nThese accounts will be used for authentication in JupyterHub's default configuration.\n\n## Contributing\n\nIf you would like to contribute to the project, please read our\n[contributor documentation](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html)\nand the [`CONTRIBUTING.md`](CONTRIBUTING.md). The `CONTRIBUTING.md` file\nexplains how to set up a development installation, how to run the test suite,\nand how to contribute to documentation.\n\nFor a high-level view of the vision and next directions of the project, see the\n[JupyterHub community roadmap](docs/source/contributing/roadmap.md).\n\n### A note about platform support\n\nJupyterHub is supported on Linux/Unix based systems.\n\nJupyterHub officially **does not** support Windows. You may be able to use\nJupyterHub on Windows if you use a Spawner and Authenticator that work on\nWindows, but the JupyterHub defaults will not. Bugs reported on Windows will not\nbe accepted, and the test suite will not run on Windows. Small patches that fix\nminor Windows compatibility issues (such as basic installation) **may** be accepted,\nhowever. For Windows-based systems, we would recommend running JupyterHub in a\ndocker container or Linux VM.\n\n[Additional Reference:](http://www.tornadoweb.org/en/stable/#installation) Tornado's documentation on Windows platform support\n\n## License\n\nWe use a shared copyright model that enables all contributors to maintain the\ncopyright on their contributions.\n\nAll code is licensed under the terms of the [revised BSD license](./COPYING.md).\n\n## Help and resources\n\nWe encourage you to ask questions and share ideas on the [Jupyter community forum](https://discourse.jupyter.org/).\nYou can also talk with us on our JupyterHub [Gitter](https://gitter.im/jupyterhub/jupyterhub) channel.\n\n- [Reporting Issues](https://github.com/jupyterhub/jupyterhub/issues)\n- [JupyterHub tutorial](https://github.com/jupyterhub/jupyterhub-tutorial)\n- [Documentation for JupyterHub](https://jupyterhub.readthedocs.io/en/latest/)\n- [Documentation for JupyterHub's REST API][rest api]\n- [Documentation for Project Jupyter](http://jupyter.readthedocs.io/en/latest/index.html)\n- [Project Jupyter website](https://jupyter.org)\n- [Project Jupyter community](https://jupyter.org/community)\n\nJupyterHub follows the Jupyter [Community Guides](https://jupyter.readthedocs.io/en/latest/community/content-community.html).\n\n---\n\n**[Technical Overview](#technical-overview)** |\n**[Installation](#installation)** |\n**[Configuration](#configuration)** |\n**[Docker](#docker)** |\n**[Contributing](#contributing)** |\n**[License](#license)** |\n**[Help and Resources](#help-and-resources)**\n", - "source_links": [], - "id": 61 - }, - { - "page_link": "network-policies.md", - "title": "network-policies", - "text": "# Network Policy\n\nJupyterhub makes extensive use of kubernetes network policies. This allows you to finely scope a jupyter notebook's access to resources available on the network, which can be very important in a multi-tenant kubernetes cluster. That said, you might want to expose some services to your network either on-cluster or in an adjacent network, and here are some recipes to do that.\n\nIn all cases, the following yaml will be added to `jupyterhub/helm/jupyterhub/values.yaml` or you can modify directly in the console at `/apps/jupyterhub/config`\n\n## Get access to an adjacent namespace\n\n```yaml\njupyterhub:\n jupyterhub:\n singleuser:\n networkPolicy:\n egress:\n - to:\n - namespaceSelector:\n matchLabels:\n kubernetes.io/metadata.name: \n```\n\n## Get access to a CIDR range\n\n\n```yaml\njupyterhub:\n jupyterhub:\n singleuser:\n networkPolicy:\n egress:\n - to:\n - ipBlock:\n cidr: 10.0.0.0/16 # access resources on an internal subnetwork for example\n```\n\n## Combine multiple policies\n\n\n```yaml\njupyterhub:\n jupyterhub:\n singleuser:\n networkPolicy:\n egress:\n - to:\n - namespaceSelector:\n matchLabels:\n kubernetes.io/metadata.name: \n - to:\n - ipBlock:\n cidr: 10.0.0.0/16\n```", - "source_links": [], - "id": 62 - }, - { - "page_link": "https://github.com/apache/kafka", - "title": "kafka readme", - "text": null, - "source_links": [], - "id": 63 - }, - { - "page_link": "https://github.com/knative/serving", - "title": "knative readme", - "text": "# Knative Serving\n\n[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white)](https://pkg.go.dev/github.com/knative/serving)\n[![Go Report Card](https://goreportcard.com/badge/knative/serving)](https://goreportcard.com/report/knative/serving)\n[![Releases](https://img.shields.io/github/release-pre/knative/serving.svg?sort=semver)](https://github.com/knative/serving/releases)\n[![LICENSE](https://img.shields.io/github/license/knative/serving.svg)](https://github.com/knative/serving/blob/main/LICENSE)\n[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://knative.slack.com)\n[![codecov](https://codecov.io/gh/knative/serving/branch/main/graph/badge.svg)](https://codecov.io/gh/knative/serving)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/5913/badge)](https://bestpractices.coreinfrastructure.org/projects/5913)\n\nKnative Serving builds on Kubernetes to support deploying and serving of\napplications and functions as serverless containers. Serving is easy to get\nstarted with and scales to support advanced scenarios.\n\nThe Knative Serving project provides middleware primitives that enable:\n\n- Rapid deployment of serverless containers\n- Automatic scaling up and down to zero\n- Routing and network programming\n- Point-in-time snapshots of deployed code and configurations\n\nFor documentation on using Knative Serving, see the\n[serving section](https://www.knative.dev/docs/serving/) of the\n[Knative documentation site](https://www.knative.dev/docs).\n\nFor documentation on the Knative Serving specification, see the\n[docs](https://github.com/knative/serving/tree/main/docs) folder of this\nrepository.\n\nIf you are interested in contributing, see [CONTRIBUTING.md](./CONTRIBUTING.md)\nand [DEVELOPMENT.md](./DEVELOPMENT.md).\n", - "source_links": [], - "id": 64 - }, - { - "page_link": "https://github.com/ory/kratos", - "title": "kratos readme", - "text": "

\"Ory

\n\n

\n Chat |\n Discussions |\n Newsletter

\n Guide |\n API Docs |\n Code Docs

\n Support this project!

\n Work in Open Source, Ory is hiring!\n

\n\n---\n\n

\n \"CI\n \n \"CII\n \n \n \n\n\nOry Kratos is the first cloud native Identity and User Management System in the\nworld. Finally, it is no longer necessary to implement a User Login process for\nthe umpteenth time!\n\n## Ory Kratos in Ory Cloud\n\nThe easiest way to get started with Ory Software is in Ory Cloud! Ory Cloud is\n[**free forever for developers**](https://console.ory.sh/registration?utm_source=github&utm_medium=banner&utm_campaign=kratos-readme),\nno credit card required.\n\nInstall the [Ory CLI](https://www.ory.sh/docs/guides/cli/installation) and\ncreate a new project to get started with Ory Kratos right away:\n\n```\n# If you don't have Ory CLI installed yet:\nbash <(curl https://raw.githubusercontent.com/ory/meta/master/install.sh) -b . ory\nsudo mv ./ory /usr/local/bin/\n\n# Sign up\nory auth\n\n# Create project\nory create project\n```\n\nOry Cloud ships administrative user interfaces, hosted pages (e.g. for login or\nregistration), support for custom domains, collaborative features for your\ncolleagues, integration services, and much more!\n\n\n\n\n**Table of Contents**\n\n- [What is Ory Kratos?](#what-is-ory-kratos)\n - [Who is using it?](#who-is-using-it)\n- [Getting Started](#getting-started)\n - [Installation](#installation)\n- [Ecosystem](#ecosystem)\n - [Ory Kratos: Identity and User Infrastructure and Management](#ory-kratos-identity-and-user-infrastructure-and-management)\n - [Ory Hydra: OAuth2 & OpenID Connect Server](#ory-hydra-oauth2--openid-connect-server)\n - [Ory Oathkeeper: Identity & Access Proxy](#ory-oathkeeper-identity--access-proxy)\n - [Ory Keto: Access Control Policies as a Server](#ory-keto-access-control-policies-as-a-server)\n- [Security](#security)\n - [Disclosing vulnerabilities](#disclosing-vulnerabilities)\n- [Telemetry](#telemetry)\n- [Documentation](#documentation)\n - [Guide](#guide)\n - [HTTP API documentation](#http-api-documentation)\n - [Upgrading and Changelog](#upgrading-and-changelog)\n - [Command line documentation](#command-line-documentation)\n - [Develop](#develop)\n - [Dependencies](#dependencies)\n - [Install from source](#install-from-source)\n - [Formatting Code](#formatting-code)\n - [Running Tests](#running-tests)\n - [Short Tests](#short-tests)\n - [Regular Tests](#regular-tests)\n - [Updating Test Fixtures](#updating-test-fixtures)\n - [End-to-End Tests](#end-to-end-tests)\n - [Build Docker](#build-docker)\n - [Documentation Tests](#documentation-tests)\n - [Preview API documentation](#preview-api-documentation)\n\n\n\n## What is Ory Kratos?\n\nOry Kratos is an API-first Identity and User Management system that is built\naccording to\n[cloud architecture best practices](https://www.ory.sh/docs/ecosystem/software-architecture-philosophy).\nIt implements core use cases that almost every software application needs to\ndeal with:\n\n- **Self-service Login and Registration**: Allow end-users to create and sign\n into accounts (we call them **identities**) using Username / Email and\n password combinations, Social Sign In (\"Sign in with Google, GitHub\"),\n Passwordless flows, and others.\n- **Multi-Factor Authentication (MFA/2FA)**: Support protocols such as TOTP\n ([RFC 6238](https://tools.ietf.org/html/rfc6238) and\n [IETF RFC 4226](https://tools.ietf.org/html/rfc4226) - better known as\n [Google Authenticator](https://en.wikipedia.org/wiki/Google_Authenticator))\n- **Account Verification**: Verify that an E-Mail address, phone number, or\n physical address actually belong to that identity.\n- **Account Recovery**: Recover access using \"Forgot Password\" flows, Security\n Codes (in case of MFA device loss), and others.\n- **Profile and Account Management**: Update passwords, personal details, email\n addresses, linked social profiles using secure flows.\n- **Admin APIs**: Import, update, delete identities.\n\nWe highly recommend reading the\n[Ory Kratos introduction docs](https://www.ory.sh/kratos/docs/) to learn more\nabout Ory Krato's background, feature set, and differentiation from other\nproducts.\n\n### Who is using it?\n\n\n\nThe Ory community stands on the shoulders of individuals, companies, and\nmaintainers. We thank everyone involved - from submitting bug reports and\nfeature requests, to contributing patches, to sponsoring our work. Our community\nis 1000+ strong and growing rapidly. The Ory stack protects 16.000.000.000+ API\nrequests every month with over 250.000+ active service nodes. We would have\nnever been able to achieve this without each and everyone of you!\n\nThe following list represents companies that have accompanied us along the way\nand that have made outstanding contributions to our ecosystem. _If you think\nthat your company deserves a spot here, reach out to\noffice-muc@ory.sh now_!\n\n**Please consider giving back by becoming a sponsor of our open source work on\nPatreon or\nOpen Collective.**\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
TypeNameLogoWebsite
SponsorRaspberry PI Foundation\n \n \n \"Raspberry\n \n raspberrypi.org
ContributorKyma Project\n \n \n \"Kyma\n \n kyma-project.io
SponsorTulip\n \n \n \"Tulip\n \n tulip.com
SponsorCashdeck / All My Funds\n \n \n \"All\n \n cashdeck.com.au
ContributorHootsuite\n \n \n \"Hootsuite\"\n \n hootsuite.com
Adopter *Segment\n \n \n \"Segment\"\n \n segment.com
Adopter *Arduino\n \n \n \"Arduino\"\n \n arduino.cc
Adopter *DataDetect\n \n \n \"Datadetect\"\n \n unifiedglobalarchiving.com/data-detect/
Adopter *Sainsbury's\n \n \n \"Sainsbury's\"\n \n sainsburys.co.uk
Adopter *Contraste\n \n \n \"Contraste\"\n \n contraste.com
Adopter *Reyah\n \n \n \"Reyah\"\n \n reyah.eu
Adopter *Zero\n \n \n \"Project\n \n getzero.dev
Adopter *Padis\n \n \n \"Padis\"\n \n padis.io
Adopter *Cloudbear\n \n \n \"Cloudbear\"\n \n cloudbear.eu
Adopter *Security Onion Solutions\n \n \n \"Security\n \n securityonionsolutions.com
Adopter *Factly\n \n \n \"Factly\"\n \n factlylabs.com
Adopter *Nortal\n \n \n \"Nortal\"\n \n nortal.com
SponsorOrderMyGear\n \n \n \"OrderMyGear\"\n \n ordermygear.com
SponsorSpiri.bo\n \n \n \"Spiri.bo\"\n \n spiri.bo
SponsorStrivacity\n \n \n \"Spiri.bo\"\n \n strivacity.com
Adopter *Hanko\n \n \n \"Hanko\"\n \n hanko.io
Adopter *Rabbit\n \n \n \"Rabbit\"\n \n rabbit.co.th
Adopter *inMusic\n \n \n \"InMusic\"\n \n inmusicbrands.com
Adopter *Buhta\n \n \n \"Buhta\"\n \n buhta.com
Adopter *Connctd\n \n \n \"Connctd\"\n \n connctd.com
Adopter *Paralus\n \n \n \"Paralus\"\n \n paralus.io
Adopter *TIER IV\n \n \n \"TIER\n \n tier4.jp
Adopter *R2Devops\n \n \n \"R2Devops\"\n \n r2devops.io
Adopter *LunaSec\n \n \n \"LunaSec\"\n \n lunasec.io
\n\nWe also want to thank all individual contributors\n\n\n\nas well as all of our backers\n\n\n\nand past & current supporters (in alphabetical order) on\n[Patreon](https://www.patreon.com/_ory): Alexander Alimovs, Billy, Chancy\nKennedy, Drozzy, Edwin Trejos, Howard Edidin, Ken Adler Oz Haven, Stefan Hans,\nTheCrealm.\n\n\\* Uses one of Ory's major projects in production.\n\n\n\n## Getting Started\n\nTo get started with some easy examples, head over to the\n[Get Started Documentation](https://www.ory.sh/docs/guides/protect-page-login/).\n\n### Installation\n\nHead over to the\n[Ory Developer Documentation](https://www.ory.sh/kratos/docs/install) to learn\nhow to install Ory Kratos on Linux, macOS, Windows, and Docker and how to build\nOry Kratos from source.\n\n## Ecosystem\n\n\n\nWe build Ory on several guiding principles when it comes to our architecture\ndesign:\n\n- Minimal dependencies\n- Runs everywhere\n- Scales without effort\n- Minimize room for human and network errors\n\nOry's architecture is designed to run best on a Container Orchestration system\nsuch as Kubernetes, CloudFoundry, OpenShift, and similar projects. Binaries are\nsmall (5-15MB) and available for all popular processor types (ARM, AMD64, i386)\nand operating systems (FreeBSD, Linux, macOS, Windows) without system\ndependencies (Java, Node, Ruby, libxml, ...).\n\n### Ory Kratos: Identity and User Infrastructure and Management\n\n[Ory Kratos](https://github.com/ory/kratos) is an API-first Identity and User\nManagement system that is built according to\n[cloud architecture best practices](https://www.ory.sh/docs/next/ecosystem/software-architecture-philosophy).\nIt implements core use cases that almost every software application needs to\ndeal with: Self-service Login and Registration, Multi-Factor Authentication\n(MFA/2FA), Account Recovery and Verification, Profile, and Account Management.\n\n### Ory Hydra: OAuth2 & OpenID Connect Server\n\n[Ory Hydra](https://github.com/ory/hydra) is an OpenID Certified\u2122 OAuth2 and\nOpenID Connect Provider which easily connects to any existing identity system by\nwriting a tiny \"bridge\" application. Gives absolute control over user interface\nand user experience flows.\n\n### Ory Oathkeeper: Identity & Access Proxy\n\n[Ory Oathkeeper](https://github.com/ory/oathkeeper) is a BeyondCorp/Zero Trust\nIdentity & Access Proxy (IAP) with configurable authentication, authorization,\nand request mutation rules for your web services: Authenticate JWT, Access\nTokens, API Keys, mTLS; Check if the contained subject is allowed to perform the\nrequest; Encode resulting content into custom headers (`X-User-ID`), JSON Web\nTokens and more!\n\n### Ory Keto: Access Control Policies as a Server\n\n[Ory Keto](https://github.com/ory/keto) is a policy decision point. It uses a\nset of access control policies, similar to AWS IAM Policies, in order to\ndetermine whether a subject (user, application, service, car, ...) is authorized\nto perform a certain action on a resource.\n\n\n\n## Security\n\nRunning identity infrastructure requires\n[attention and knowledge of threat models](https://www.ory.sh/kratos/docs/concepts/security).\n\n### Disclosing vulnerabilities\n\nIf you think you found a security vulnerability, please refrain from posting it\npublicly on the forums, the chat, or GitHub and send us an email to\n[hi@ory.am](mailto:hi@ory.sh) instead.\n\n## Telemetry\n\nOry's services collect summarized, anonymized data that can optionally be turned\noff. Click [here](https://www.ory.sh/docs/ecosystem/sqa) to learn more.\n\n## Documentation\n\n### Guide\n\nThe Guide is available [here](https://www.ory.sh/kratos/docs).\n\n### HTTP API documentation\n\nThe HTTP API is documented [here](https://www.ory.sh/kratos/docs/sdk/api).\n\n### Upgrading and Changelog\n\nNew releases might introduce breaking changes. To help you identify and\nincorporate those changes, we document these changes in the\n[CHANGELOG.md](./CHANGELOG.md). For upgrading, please visit the\n[upgrade guide](https://www.ory.sh/kratos/docs/guides/upgrade).\n\n### Command line documentation\n\nRun kratos -h or\nkratos help.\n\n### Develop\n\nWe encourage all contributions and encourage you to read our\n[contribution guidelines](./CONTRIBUTING.md)\n\n#### Dependencies\n\nYou need Go 1.16+ and (for the test suites):\n\n- Docker and Docker Compose\n- Makefile\n- NodeJS / npm\n\nIt is possible to develop Ory Kratos on Windows, but please be aware that all\nguides assume a Unix shell like bash or zsh.\n\n#### Install from source\n\n

\nmake install\n
\n\n#### Formatting Code\n\nYou can format all code using make format. Our\nCI checks if your code is properly formatted.\n\n#### Running Tests\n\nThere are three types of tests you can run:\n\n- Short tests (do not require a SQL database like PostgreSQL)\n- Regular tests (do require PostgreSQL, MySQL, CockroachDB)\n- End to end tests (do require databases and will use a test browser)\n\n##### Short Tests\n\nShort tests run fairly quickly. You can either test all of the code at once\n\n```shell script\ngo test -short -tags sqlite ./...\n```\n\nor test just a specific module:\n\n```shell script\ncd client; go test -tags sqlite -short .\n```\n\n##### Regular Tests\n\nRegular tests require a database set up. Our test suite is able to work with\ndocker directly (using [ory/dockertest](https://github.com/ory/dockertest)) but\nwe encourage to use the Makefile instead. Using dockertest can bloat the number\nof Docker Images on your system and are quite slow. Instead we recommend doing:\n\n
\nmake test\n
\n\nPlease be aware that make test recreates the\ndatabases every time you run make test. This\ncan be annoying if you are trying to fix something very specific and need the\ndatabase tests all the time. In that case we suggest that you initialize the\ndatabases with:\n\n\n\n```shell script\nmake test-resetdb\nexport TEST_DATABASE_MYSQL='mysql://root:secret@(127.0.0.1:3444)/mysql?parseTime=true'\nexport TEST_DATABASE_POSTGRESQL='postgres://postgres:secret@127.0.0.1:3445/kratos?sslmode=disable'\nexport TEST_DATABASE_COCKROACHDB='cockroach://root@127.0.0.1:3446/defaultdb?sslmode=disable'\n```\n\n\n\nThen you can run `go test` as often as you'd like:\n\n```shell script\ngo test -tags sqlite ./...\n\n# or in a module:\ncd client; go test -tags sqlite .\n```\n\n##### Updating Test Fixtures\n\nSome tests use fixtures. If payloads change, you can update them with:\n\n```\nmake test-update-snapshots\n```\n\nThis will only update the snapshots of the short tests. To update all snapshots,\nrun:\n\n```bash\nUPDATE_SNAPSHOTS=true go test -p 4 -tags sqlite ./...\n```\n\nYou can also run this command from a sub folder.\n\n##### End-to-End Tests\n\nWe use [Cypress](https://www.cypress.io) to run our e2e tests.\n\nThe simplest way to develop e2e tests is:\n\n
\n./test/e2e/run.sh --dev sqlite\n
\n\nYou can run all tests (with databases) using:\n\n
\nmake test-e2e\n
\n\nFor more details, run:\n\n
\n./test/e2e/run.sh\n
\n\n**Run only a singular test**\n\nAdd `.only` to the test you would like to run.\n\nFor example:\n\n```ts\nit.only('invalid remote recovery email template', () => {\n ...\n})\n```\n\n**Run a subset of tests**\n\nThis will require editing the `cypress.json` file located in the `test/e2e/`\nfolder.\n\nAdd the `testFiles` option and specify the test to run inside the\n`cypress/integration` folder. As an example we will add only the `network`\ntests.\n\n```json\n\"testFiles\": [\"profiles/network/*\"],\n```\n\nNow start the tests again using the run script or makefile.\n\n#### Build Docker\n\nYou can build a development Docker Image using:\n\n
\nmake docker\n
\n\n#### Documentation Tests\n\nTo prepare documentation tests, run `npm i` to install\n[Text-Runner](https://github.com/kevgo/text-runner).\n\n- test all documentation: make test-docs\n- test an individual file: text-run\n\n#### Preview API documentation\n\n- update the SDK including the OpenAPI specification:\n make sdk\n- run preview server for API documentation: make\n docs/api\n- run preview server for swagger documentation: make\n docs/swagger\n", - "source_links": [], - "id": 65 - }, - { - "page_link": null, - "title": "kserve readme", - "text": null, - "source_links": [], - "id": 66 - }, - { - "page_link": "https://github.com/kubecost/cost-model", - "title": "kubecost readme", - "text": "## Kubecost\n\nKubecost models give teams visibility into current and historical Kubernetes spend and resource allocation. These models provide cost transparency in Kubernetes environments that support multiple applications, teams, departments, etc.\n\n![Kubecost allocation UI](/allocation-drilldown.gif)\n\nTo see more on the functionality of the full Kubecost product, please visit the [features page](https://kubecost.com/#features) on our website. \nHere is a summary of features enabled by this cost model:\n\n- Real-time cost allocation by Kubernetes service, deployment, namespace, label, statefulset, daemonset, pod, and container\n- Dynamic asset pricing enabled by integrations with AWS, Azure, and GCP billing APIs \n- Supports on-prem k8s clusters with custom pricing sheets\n- Allocation for in-cluster resources like CPU, GPU, memory, and persistent volumes.\n- Allocation for AWS & GCP out-of-cluster resources like RDS instances and S3 buckets with key (optional)\n- Easily export pricing data to Prometheus with /metrics endpoint ([learn more](PROMETHEUS.md))\n- Free and open source distribution (Apache2 license)\n\n## Requirements\n\n- Kubernetes version 1.8 or higher\n- Prometheus\n- kube-state-metrics (optional) \n\n## Getting Started\n\nYou can deploy Kubecost on any Kubernetes 1.8+ cluster in a matter of minutes, if not seconds. \nVisit the Kubecost docs for [recommended install options](https://docs.kubecost.com/install). Compared to building from source, installing from Helm is faster and includes all necessary dependencies. \n\n## Usage\n\n* User interface\n* [Cost APIs](https://github.com/kubecost/docs/blob/master/apis.md)\n* [CLI / kubectl cost](https://github.com/kubecost/kubectl-cost)\n* [Prometheus metric exporter](kubecost-exporter.md)\n\n## Contributing\n\nWe :heart: pull requests! See [`CONTRIBUTING.md`](CONTRIBUTING.md) for information on buiding the project from source\nand contributing changes. \n\n## Licensing\n\nLicensed under the Apache License, Version 2.0 (the \"License\")\n\n ## Software stack\n\nGolang application. \nPrometheus. \nKubernetes. \n\n## Frequently Asked Questions\n\n#### How do you measure the cost of CPU/RAM/GPU/storage for a container, pod, deployment, etc.\n\nThe Kubecost model collects pricing data from major cloud providers, e.g. GCP, Azure and AWS, to provide the real-time cost of running workloads. Based on data from these APIs, each container/pod inherits a cost per CPU-hour, GPU-hour, Storage Gb-hour and cost per RAM Gb-hour based on the node where it was running or the class of storage provisioned. This means containers of the same size, as measured by the max of requests or usage, could be charged different resource rates if they are scheduled in seperate regions, on nodes with different usage types (on-demand vs preemptible), etc. \n\nFor on-prem clusters, these resource prices can be configured directly with custom pricing sheets (more below).\n\nMeasuring the CPU/RAM/GPU cost of a deployment, service, namespace, etc is the aggregation of its individual container costs.\n\n#### How do you determine RAM/CPU costs for a node when this data isn\u2019t provided by a cloud provider?\n\nWhen explicit RAM or CPU prices are not provided by your cloud provider, the Kubecost model falls back to the ratio of base CPU and RAM price inputs supplied. The default values for these parameters are based on the marginal resource rates of the cloud provider, but they can be customized within Kubecost.\n\nThese base RAM/CPU prices are normalized to ensure the sum of each component is equal to the total price of the node provisioned, based on billing rates from your provider. When the sum of RAM/CPU costs is greater (or less) than the price of the node, then the ratio between the two input prices are held constant. \n\nAs an example, let's imagine a node with 1 CPU and 1 Gb of RAM that costs $20/mo. If your base CPU price is $30 and your RAM Gb price is $10, then these inputs will be normlized to $15 for CPU and $5 for RAM so that the sum equals the cost of the node. Note that the price of a CPU remains 3x the price of a Gb of RAM. \n\n NodeHourlyCost = NORMALIZED_CPU_PRICE * # of CPUS + NORMALIZED_RAM_PRICE * # of RAM Gb\n\n#### How do you allocate a specific amount of RAM/CPU to an individual pod or container?\n\nResources are allocated based on the time-weighted maximum of resource Requests and Usage over the measured period. For example, a pod with no usage and 1 CPU requested for 12 hours out of a 24 hour window would be allocated 12 CPU hours. For pods with BestEffort quality of service (i.e. no requests) allocation is done solely on resource usage. \n\n#### How do I set my AWS Spot estimates for cost allocation?\n\nModify [spotCPU](https://github.com/kubecost/cost-model/blob/master/configs/default.json#L5) and [spotRAM](https://github.com/kubecost/cost-model/blob/master/configs/default.json#L7) in default.json to the level of recent market prices. Allocation will use these prices, but it does not take into account what you are actually charged by AWS. Alternatively, you can provide an AWS key to allow access to the Spot data feed. This will provide accurate Spot price reconciliation. \n\n#### Do I need a GCP billing API key?\n\nWe supply a global key with a low limit for evaluation, but you will want to supply your own before moving to production. \n \nPlease reach out with any additional questions on [Slack](https://join.slack.com/t/kubecost/shared_invite/enQtNTA2MjQ1NDUyODE5LWFjYzIzNWE4MDkzMmUyZGU4NjkwMzMyMjIyM2E0NGNmYjExZjBiNjk1YzY5ZDI0ZTNhZDg4NjlkMGRkYzFlZTU) or via email at [team@kubecost.com](team@kubecost.com). \n", - "source_links": [], - "id": 67 - }, - { - "page_link": "https://github.com/kubeflow/kubeflow", - "title": "kubeflow readme", - "text": "\nKubeflow the cloud-native platform for machine learning operations - pipelines, training and deployment.\n\n---\n\n## Documentation\nPlease refer to the official docs at [kubeflow.org](http://kubeflow.org).\n\n## Working Groups\nThe Kubeflow community is organized into working groups (WGs) with associated repositories, that focus on specific pieces of the ML platform. \n\n* [AutoML](https://github.com/kubeflow/community/tree/master/wg-automl)\n* [Deployment](https://github.com/kubeflow/community/tree/master/wg-deployment)\n* [Manifests](https://github.com/kubeflow/community/tree/master/wg-manifests)\n* [Notebooks](https://github.com/kubeflow/community/tree/master/wg-notebooks)\n* [Pipelines](https://github.com/kubeflow/community/tree/master/wg-pipelines)\n* [Serving](https://github.com/kubeflow/community/tree/master/wg-serving)\n* [Training](https://github.com/kubeflow/community/tree/master/wg-training)\n\n## Quick Links\n* [Prow jobs dashboard](http://prow.kubeflow-testing.com)\n* [PR Dashboard](https://k8s-gubernator.appspot.com/pr)\n* [Argo UI for E2E tests](https://argo.kubeflow-testing.com)\n\n## Get Involved\nPlease refer to the [Community](https://www.kubeflow.org/docs/about/community/) page.\n\n", - "source_links": [], - "id": 68 - }, - { - "page_link": "https://github.com/armosec/kubescape", - "title": "kubescape readme", - "text": "
\n \"logo\"\n
\n\n---\n\n[![build](https://github.com/kubescape/kubescape/actions/workflows/build.yaml/badge.svg)](https://github.com/kubescape/kubescape/actions/workflows/build.yaml)\n[![Go Report Card](https://goreportcard.com/badge/github.com/kubescape/kubescape)](https://goreportcard.com/report/github.com/kubescape/kubescape)\n[![Gitpod Ready-to-Code](https://img.shields.io/badge/Gitpod-Ready--to--Code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/kubescape/kubescape)\n\n:sunglasses: [Want to contribute?](#being-a-part-of-the-team) :innocent: \n\n\nKubescape is a K8s open-source tool providing a Kubernetes single pane of glass, including risk analysis, security compliance, RBAC visualizer, and image vulnerability scanning. \nKubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo/?utm_source=github&utm_medium=repository), [MITRE ATT&CK\u00ae](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.\n\nIt has become one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins precious time, effort, and resources.\nKubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack, and supports multi-cloud K8s deployments like EKS, GKE, and AKS.\n\n
\n\n# Kubescape CLI:\n\n\n
\n\n# TL;DR\n## Install:\n```sh\ncurl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh | /bin/bash\n```\n\n*OR:*\n\n[Install on windows](#install-on-windows)\n\n[Install on macOS](#install-on-macos)\n\n[Install on NixOS or Linux/macOS via nix](#install-on-nixos-or-with-nix-community)\n\n## Run:\n```sh\nkubescape scan --submit --enable-host-scan --verbose\n```\n\n\n\n
\n\n> Kubescape is an open source project. We welcome your feedback and ideas for improvement. We\u2019re also aiming to collaborate with the Kubernetes community to help make the tests more robust and complete as Kubernetes develops.\n\n
\n\n## Architecture in short\n### CLI\n\n\n### Operator\n\n\n### Please [star \u2b50](https://github.com/kubescape/kubescape/stargazers) the repo if you want us to continue developing and improving Kubescape \ud83d\ude00\n\n
\n\n# Being a part of the team\n\n## Community\nWe invite you to our community! We are excited about this project and want to return the love we get.\n\nWe hold community meetings in [Zoom](https://us02web.zoom.us/j/84020231442) on the first Tuesday of every month at 14:00 GMT! :sunglasses:\n\n## Contributions \n[Want to contribute?](https://github.com/kubescape/kubescape/blob/master/CONTRIBUTING.md) Want to discuss something? Have an issue? Please make sure that you follow our [Code Of Conduct](https://github.com/kubescape/kubescape/blob/master/CODE_OF_CONDUCT.md) . \n\n* Feel free to pick a task from the [issues](https://github.com/kubescape/kubescape/issues?q=is%3Aissue+is%3Aopen+label%3A%22open+for+contribution%22), [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :) \n* [Open an issue](https://github.com/kubescape/kubescape/issues/new/choose) , we are trying to respond within 48 hours\n* [Join us](https://discord.com/invite/WKZRaCtBxN) in the discussion on our discord server!\n\n[\"logo\"](https://discord.com/invite/WKZRaCtBxN)\n![discord](https://img.shields.io/discord/893048809884643379)\n\n\n# Options and examples\n\n[Kubescape docs](https://hub.armosec.io/docs?utm_source=github&utm_medium=repository)\n\n## Playground\n* [Kubescape playground](https://killercoda.com/saiyampathak/scenario/kubescape)\n\n## Tutorials\n\n* [Overview](https://youtu.be/wdBkt_0Qhbg)\n* [How To Secure Kubernetes Clusters With Kubescape And Armo](https://youtu.be/ZATGiDIDBQk)\n* [Scan Kubernetes YAML files](https://youtu.be/Ox6DaR7_4ZI)\n* [Scan Kubescape on an air-gapped environment (offline support)](https://youtu.be/IGXL9s37smM)\n* [Managing exceptions in the Kubescape SaaS version](https://youtu.be/OzpvxGmCR80)\n* [Configure and run customized frameworks](https://youtu.be/12Sanq_rEhs)\n* Customize control configurations: \n - [Kubescape CLI](https://youtu.be/955psg6TVu4) \n - [Kubescape SaaS](https://youtu.be/lIMVSVhH33o)\n\n## Install on Windows\n\n
Windows\n\n**Requires powershell v5.0+**\n\n``` powershell\niwr -useb https://raw.githubusercontent.com/kubescape/kubescape/master/install.ps1 | iex\n```\n\nNote: if you get an error you might need to change the execution policy (i.e. enable Powershell) with\n\n``` powershell\nSet-ExecutionPolicy RemoteSigned -scope CurrentUser\n```\n
\n\n\n## Install on macOS\n\n
MacOS\n\n1. ```sh\n brew tap kubescape/tap\n ```\n2. ```sh\n brew install kubescape-cli\n ```\n
\n\n## Install on NixOS or with nix (Community)\n\n
Nix/NixOS\n\nDirect issues installing `kubescape` via `nix` through the channels mentioned [here](https://nixos.wiki/wiki/Support)\n\nYou can use `nix` on Linux or macOS and on other platforms unofficially.\n\nTry it out in an ephemeral shell: `nix-shell -p kubescape`\n\nInstall declarative as usual\n\nNixOS:\n\n```nix\n # your other config ...\n environment.systemPackages = with pkgs; [\n # your other packages ...\n kubescape\n ];\n```\n\nhome-manager:\n\n```nix\n # your other config ...\n home.packages = with pkgs; [\n # your other packages ...\n kubescape\n ];\n```\n\nOr to your profile (not preferred): `nix-env --install -A nixpkgs.kubescape`\n\n
\n\n## Usage & Examples\n\n### Examples\n\n\n#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)\n```\nkubescape scan --submit --enable-host-scan --verbose\n```\n\n> Read [here](https://hub.armosec.io/docs/host-sensor?utm_source=github&utm_medium=repository) more about the `enable-host-scan` flag\n\n#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)\n```\nkubescape scan framework nsa --submit\n```\n\n\n#### Scan a running Kubernetes cluster with [`MITRE ATT&CK\u00ae`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)\n```\nkubescape scan framework mitre --submit\n```\n\n\n#### Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armosec.io/docs/controls?utm_source=github&utm_medium=repository) \n```\nkubescape scan control \"Privileged container\"\n```\n\n#### Scan using an alternative kubeconfig file\n```\nkubescape scan --kubeconfig cluster.conf\n```\n\n#### Scan specific namespaces\n```\nkubescape scan --include-namespaces development,staging,production\n```\n\n#### Scan cluster and exclude some namespaces\n```\nkubescape scan --exclude-namespaces kube-system,kube-public\n```\n\n#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI). Submit the results in case the directory is a git repo. [docs](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)\n```\nkubescape scan *.yaml --submit\n```\n\n#### Scan Kubernetes manifest files from a git repository [and submit the results](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)\n```\nkubescape scan https://github.com/kubescape/kubescape --submit\n```\n\n#### Display all scanned resources (including the resources which passed) \n```\nkubescape scan --verbose\n```\n\n#### Output in `json` format\n\n> Add the `--format-version v2` flag \n\n```\nkubescape scan --format json --format-version v2 --output results.json\n```\n\n#### Output in `junit xml` format\n```\nkubescape scan --format junit --output results.xml\n```\n\n#### Output in `pdf` format - Contributed by [@alegrey91](https://github.com/alegrey91)\n\n```\nkubescape scan --format pdf --output results.pdf\n```\n\n#### Output in `prometheus` metrics format - Contributed by [@Joibel](https://github.com/Joibel)\n\n```\nkubescape scan --format prometheus\n```\n\n#### Output in `html` format\n\n```\nkubescape scan --format html --output results.html\n```\n\n#### Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`\n[Full documentation](examples/exceptions/README.md)\n```\nkubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json\n```\n\n#### Scan Helm charts \n```\nkubescape scan --submit\n```\n> Kubescape will load the default value file\n\n#### Scan Kustomize Directory \n```\nkubescape scan --submit\n```\n> Kubescape will generate Kubernetes Yaml Objects using 'Kustomize' file and scans them for security.\n\n### Offline/Air-gaped Environment Support\n\n[Video tutorial](https://youtu.be/IGXL9s37smM)\n\nIt is possible to run Kubescape offline!\n#### Download all artifacts\n\n1. Download and save in local directory, if path not specified, will save all in `~/.kubescape`\n```\nkubescape download artifacts --output path/to/local/dir\n```\n2. Copy the downloaded artifacts to the air-gaped/offline environment\n\n3. Scan using the downloaded artifacts\n```\nkubescape scan --use-artifacts-from path/to/local/dir\n```\n\n#### Download a single artifact\n\nYou can also download a single artifact and scan with the `--use-from` flag\n\n1. Download and save in a file, if the file name is not specified, will save in `~/.kubescape/.json`\n```\nkubescape download framework nsa --output /path/nsa.json\n```\n2. Copy the downloaded artifacts to the air-gaped/offline environment\n\n3. Scan using the downloaded framework\n```\nkubescape scan framework nsa --use-from /path/nsa.json\n```\n\n\n## Scan Periodically using Helm \n[Please follow the instructions here](https://hub.armosec.io/docs/installation-of-armo-in-cluster?utm_source=github&utm_medium=repository)\n[helm chart repo](https://github.com/armosec/armo-helm)\n\n# Integrations\n\n## VS Code Extension \n\n![Visual Studio Marketplace Downloads](https://img.shields.io/visual-studio-marketplace/d/kubescape.kubescape?label=VScode) ![Open VSX](https://img.shields.io/open-vsx/dt/kubescape/kubescape?label=openVSX&color=yellowgreen)\n\nScan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md) \n\n## Lens Extension\n\nView Kubescape scan results directly in [Lens IDE](https://k8slens.dev/) using kubescape [Lens extension](https://github.com/armosec/lens-kubescape/blob/master/README.md)\n\n\n# Building Kubescape\n\n## Build on Windows\n\n
Windows\n\n1. Install MSYS2 & build libgit _(needed only for the first time)_\n\n ```\n build.bat all\n ```\n\n> You can install MSYS2 separately by running `build.bat install` and build libgit2 separately by running `build.bat build`\n\n2. Build kubescape\n\n ```\n make build\n ```\n\n OR \n\n ```\n go build -tags=static .\n ```\n
\n\n## Build on Linux/MacOS\n\n
Linux / MacOS\n\n1. Install libgit2 dependency _(needed only for the first time)_\n \n ```\n make libgit2\n ```\n\n> `cmake` is required to build libgit2. You can install it by running `sudo apt-get install cmake` (Linux) or `brew install cmake` (macOS)\n\n2. Build kubescape\n\n ```\n make build\n ```\n\n OR \n\n ```\n go build -tags=static .\n ```\n\n3. Test\n\n ```\n make test\n ```\n\n
\n\n## Build on pre-configured killercoda's ubuntu playground\n\n* [Pre-configured Killercoda's Ubuntu Playground](https://killercoda.com/suhas-gumma/scenario/kubescape-build-for-development)\n\n
Pre-programmed actions executed by the playground \n\n\n* Clone the official GitHub repository of `Kubescape`.\n* [Automate the build process on Linux](https://github.com/kubescape/kubescape#build-on-linuxmacos)\n* The entire process involves executing multiple commands in order and it takes around 5-6 minutes to execute them all.\n\n
\n\n
\nInstructions to use the playground\n\n* Apply changes you wish to make to the kubescape directory using text editors like `Vim`.\n* [Build on Linux](https://github.com/kubescape/kubescape#build-on-linuxmacos)\n* Now, you can use Kubescape just like a normal user. Instead of using `kubescape`, use `./kubescape`. (Make sure you are inside kubescape directory because the command will execute the binary named `kubescape` in `kubescape directory`)\n\n
\n\n## VS code configuration samples\n\nYou can use the sample files below to setup your VS code environment for building and debugging purposes.\n\n\n
.vscode/settings.json\n\n```json5\n// .vscode/settings.json\n{\n \"go.testTags\": \"static\",\n \"go.buildTags\": \"static\",\n \"go.toolsEnvVars\": {\n \"CGO_ENABLED\": \"1\"\n }\n}\n```\n
\n\n
.vscode/launch.json\n\n```json5\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Launch Package\",\n \"type\": \"go\",\n \"request\": \"launch\",\n \"mode\": \"auto\",\n \"program\": \"${workspaceFolder}/main.go\",\n \"args\": [\n \"scan\",\n \"--logger\",\n \"debug\"\n ],\n \"buildFlags\": \"-tags=static\"\n }\n ]\n}\n```\n
\n\n# Under the hood\n\n## Technology\nKubescape is based on the [OPA engine](https://github.com/open-policy-agent/opa) and ARMO's posture controls.\n\nThe tools retrieve Kubernetes objects from the API server and run a set of [rego's snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io?utm_source=github&utm_medium=repository).\n\nThe results by default are printed in a pretty \"console friendly\" manner, but they can be retrieved in JSON format for further processing.\n\nKubescape is an open source project, we welcome your feedback and ideas for improvement. We\u2019re also aiming to collaborate with the Kubernetes community to help make the tests more robust and complete as Kubernetes develops.\n\n## Thanks to all the contributors \u2764\ufe0f\n\n \n\n\n", - "source_links": [], - "id": 69 - }, - { - "page_link": null, - "title": "kubricks readme", - "text": null, - "source_links": [], - "id": 70 - }, - { - "page_link": null, - "title": "kyverno readme", - "text": null, - "source_links": [], - "id": 71 - }, - { - "page_link": null, - "title": "lakefs readme", - "text": null, - "source_links": [], - "id": 72 - }, - { - "page_link": "https://github.com/lightdash/lightdash", - "title": "lightdash readme", - "text": "

\n \n

\n\n

The open-source Looker alternative.

\n\n
\n \n
\n
\n

\n Website \u2022\n Watch demo \u2022\n Docs \u2022\n Join Slack Community\n

\n
\n\n
\n
\n\n\n
\n
\n\n\n\n\n\n
\n\n
\n\n### Enable everybody in your company to answer their own questions using data\n\nconnect your dbt project --> add metrics into dbt --> share insights with your team\n\nIf you're a fan, star the repo \u2b50\ufe0f (we [plant a tree](#the-lightdash-forest) for every GitHub star we get \ud83c\udf31).\n\nCome join the team, [we're hiring](https://lightdash.notion.site/Lightdash-Job-Board-a2c7d872794b45deb7b76ad68701d750).\n\n
\n\n## Features:\n\n- [x] \ud83d\ude4f Familiar interface for your users to self-serve using pre-defined metrics\n- [x] \ud83d\udc69\u200d\ud83d\udcbb Declare dimensions and metrics in yaml alongside your dbt project\n- [x] \ud83e\udd16 Automatically creates dimensions from your dbt models\n- [x] \ud83d\udcd6 All dbt descriptions synced for your users\n- [x] \ud83e\uddee Table calculations make it easy to dig into your data, on the fly\n- [x] \ud83d\udd75\ufe0f\u200d\u2640\ufe0f Lineage lets you see the upstream and downstream dependencies of a model\n- [x] \ud83d\udcca Simple data visualisations for your metrics\n- [x] \ud83d\udc77\u200d\u2642\ufe0f Save charts & build dashboards to share your insights with your team\n- [x] \ud83d\ude80 Share your work as a URL or export results to use in any other tool\n\nSomething missing? Check out our [open issues](https://github.com/lightdash/lightdash/issues)\nto see if what you're looking for already exists (and give it a \ud83d\udc4d). Otherwise, we'd love it if\nyou'd [open a new issue with your request](https://github.com/lightdash/lightdash/issues/new/choose) \ud83d\ude0a\n\n## Demo\n\nPlay with our [demo app](https://demo.lightdash.com)!\n\n## Quick start\n\n### 1-click deploy\n\nDeploy Lightdash with 1-click (free options available\n\n
\n\n\n \"Deploy\n\n\n \"Deploy\n\n
\n\nDeploy your own hosted Lightdash instance with Heroku (free account available). Check\nthe [documentation page](https://docs.lightdash.com/get-started/setup-lightdash/install-lightdash#deploy-to-heroku) for\nmore details.\n\n### Run locally\n\nTake advantage of our installation script to easily run Lightdash locally. Check\nthe [documentation page](https://docs.lightdash.com/get-started/setup-lightdash/install-lightdash#deploy-locally-with-our-installation-script)\nfor more details.\n\n```bash\ngit clone https://github.com/lightdash/lightdash\ncd lightdash\n./scripts/install.sh\n```\n\n### Deploy to production\n\nFollow our [kubernetes guide](https://docs.lightdash.com/guides/how-to-deploy-to-kubernetes) to deploy Lightdash to\nproduction using our [community helm charts](https://github.com/lightdash/helm-charts).\n\n### Sign up to Lightdash Cloud\n\nYou can avoid the hassle of hosting and configuring Lightdash yourself by\u00a0[signing up for Lightdash Cloud Public Beta](https://lightdash.typeform.com/public-beta#source=github) . We'll let you know once we're ready to bring you on board \ud83d\ude42\n\n## Getting started\n\nStep 1 - \u26a1\ufe0f [Install Lightdash](https://docs.lightdash.com/get-started/setup-lightdash/install-lightdash)\n\nStep 2 - \ud83d\udd0c [Connect a project](https://docs.lightdash.com/get-started/setup-lightdash/connect-project)\n\nStep 3 - \ud83d\udc69\u200d\ud83d\udcbb [Create your first metric](https://docs.lightdash.com/get-started/setup-lightdash/intro-metrics-dimensions)\n\n## Community Support\n\n\ud83d\udce3 If you want something a bit more, then [head on over to our Slack Community](https://join.slack.com/t/lightdash-community/shared_invite/zt-1bfmfnyfq-nSeTVj0cT7i2ekAHYbBVdQ) where you\u2019ll be able to chat directly with all of us at Lightdash and all the other amazing members of our community. We\u2019re happy to talk about anything from feature requests, implementation details or dbt quirks to memes and SQL jokes!\n\nYou can also keep up to date with Lightdash by following us elsewhere:\n\n- [Twitter](https://twitter.com/lightdash_devs)\n- [LinkedIn](https://www.linkedin.com/company/lightdash)\n\n## About Lightdash\n\n### \ud83d\uddc2 **Keep all of your business logic in one place.**\n\nWe let you define your metrics and dimensions directly in your dbt project, keeping all of your business logic in one place and increasing the context around your analytics.\n\nNo more deciding which of the four different values for total revenue is the **_right_** one (you can thank us later \ud83d\ude09).\n\n### \ud83e\udd1d **Build trust in your data.**\n\nWe want everyone at your company to feel like they can trust the data. So, why not **_show_** them that they can?\n\nWe bring the context you want around data quality _into_ your BI tool so people know that they can trust the data.\n\n### \ud83e\uddf1 **Give users meaningful building blocks to answer their own data questions.**\n\nWith Lightdash, you can leave the SQL to the experts. We give your data team the tools they need to build metrics and dimensions that everyone else can use.\n\nSo, anybody in the business can combine, segment, and filter these metrics and dimensions to answer their own questions.\n\n### \ud83d\udcd6 **Open source, now and forever**\n\n**Lightdash is built with our community, for our community.**\n\nWe think that a BI tool should be affordable, configurable, and secure - and being open source lets us be all three \ud83d\ude42\n\n### \ud83e\udd11 **Affordable analytics**\n\nLove Looker, but don't love Looker's price tag?\n\nWith Lightdash, we offer a free self-hosted service (it's all just open source!), or an affordable cloud-service option if you're looking for an easy analytics set up.\n\n## Docs\n\nHave a question about a feature? Or maybe fancy some light reading? Head on over to\nour [Lightdash documentation](https://docs.lightdash.com/) to check out some tutorials, reference docs, FAQs and more.\n\n## Reporting bugs and feature requests\n\nWant to report a bug or request a feature? Open an [issue](https://github.com/lightdash/lightdash/issues/new/choose).\n\n## The Lightdash Forest\n\nWe're planting trees with the help of the Lightdash community.\n\nTree planting is one of the simplest and most cost-effective means of mitigating climate change, by absorbing CO2 from the atmosphere. So we thought it would be pretty neat to grow a forest while we grow Lightdash.\n\nWant to help us grow our forest?\n\nJust star this repo! We plant a tree for every star we get on Github. \u2b50\ufe0f \u27a1\ufe0f \ud83c\udf31\n\nWe plant trees with TIST, you can read all about them here: https://program.tist.org/.\n\n## Developing locally & Contributing\n\nWe love contributions big or small, check out [our guide](https://github.com/lightdash/lightdash/blob/main/.github/CONTRIBUTING.md#contributing-to-lightdash) on how to get started.\n\nSee our [instructions](https://github.com/lightdash/lightdash/blob/main/.github/CONTRIBUTING.md#setup-development-environment) on developing Lightdash locally.\n\n## Contributors \u2728\n\nThanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):\n\n\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
\"Rahul
Rahul Jain

\ud83d\udcd6
\"Oliver
Oliver Laslett

\ud83d\udcbb \ud83d\udcd6 \ud83d\udc1b \ud83c\udfa8 \ud83d\ude87
\"Katie
Katie Hindson

\ud83d\udc1b \ud83d\udcd6 \ud83c\udfa8 \ud83d\udcbb \ud83e\udd14
\"Hamzah
Hamzah Chaudhary

\ud83d\udcd6 \ud83d\udcbb \ud83e\udd14 \ud83d\udc1b
\"Harry
Harry Grieve

\ud83d\udcd6
\"Dominik
Dominik Dorfmeister

\ud83c\udfa8
\"amin-nejad\"/
amin-nejad

\ud83d\udc1b
\"Mitja
Mitja Poto\u010din

\ud83d\udcbb
\"Jose
Jose Rego

\ud83d\udcbb \ud83c\udfa8 \ud83d\udcd6 \ud83d\udc1b \u26a0\ufe0f \ud83d\ude87
\"Rahul\"/
Rahul

\ud83d\udc1b \ud83c\udfa8 \ud83d\udcbb \ud83d\udcd6
\"Jeshua
Jeshua Maxey

\ud83d\udc1b
\"Sreejith
Sreejith Madhavan

\ud83d\udc1b
\"skame\"/
skame

\ud83d\udc1b \ud83c\udfa8
\"sgoley\"/
sgoley

\ud83d\udcd6
\"djayatillake\"/
djayatillake

\ud83c\udfa8 \ud83d\udcbb \ud83e\udd14
\"Mukesh\"/
Mukesh

\ud83d\udd23 \ud83d\udc1b
\"Andreia
Andreia Freitas

\u26a0\ufe0f \ud83d\udcd6
\"jb\"/
jb

\ud83d\udcbb \ud83d\udc1b \ud83c\udfa8
\"Amy
Amy Chen

\ud83d\udcd6
\"John
John Keech

\ud83d\ude87
\"Dr.
Dr. Ernie Prabhakar

\ud83d\udc1b \ud83e\udd14
\"PriPatel\"/
PriPatel

\ud83c\udfa8 \ud83d\udc1b \ud83e\udd14
\"NaomiJohnson\"/
NaomiJohnson

\ud83c\udfa8 \ud83d\udc1b
\"Rich
Rich Shen

\ud83d\udcbb \u26a0\ufe0f \ud83d\udc1b
\"David
David Gasquez

\ud83e\udd14 \ud83c\udfa8
\"xjaner\"/
xjaner

\ud83e\udd14
\"Chris
Chris Bol

\ud83e\udd14
\"Anil
Anil V

\ud83e\udd14
\"rlebrao\"/
rlebrao

\ud83e\udd14 \ud83d\udc1b
\"philcarr-tsl\"/
philcarr-tsl

\ud83d\udc1b \ud83d\udd23
\"HashimsGitHub\"/
HashimsGitHub

\ud83d\ude87
\"Nathalia
Nathalia Buitrago Jurado

\ud83d\udcd6 \ud83d\udcbb \ud83d\udc1b \ud83c\udfa8
\"norbag\"/
norbag

\ud83d\udc1b
\"Shrpp\"/
Shrpp

\ud83d\udc1b
\"Cuong
Cuong Vu

\ud83d\udc1b
\"Takaaki
Takaaki Yoshikawa

\ud83e\udd14
\"nkotlyarov\"/
nkotlyarov

\ud83d\udc1b
\"kim
kim monzon

\ud83e\udd14
\"rverheijen\"/
rverheijen

\u26a0\ufe0f \ud83d\udc1b
\"Spencer
Spencer Carrucciu

\ud83e\udd14
\"Mark
Mark Olliver

\ud83d\udc1b
\"gary-beautypie\"/
gary-beautypie

\ud83d\udc1b
\"Andr\u00e9
Andr\u00e9 Claudino

\ud83d\udcbb \ud83d\ude87
\"Jim
Jim Park

\ud83d\ude87
\"gc-p\"/
gc-p

\ud83d\udc1b
\"Micha\u0142
Micha\u0142 \u0141azowik

\ud83d\udcbb
\"Chun
Chun Wei

\ud83e\udd14
\"snyh-paulhenderson\"/
snyh-paulhenderson

\ud83e\udd14
\"Frank
Frank Hoffs\u00fcmmer

\ud83d\udc1b
\"Sarah
Sarah Moens

\ud83d\udcd6
\"Abhishek
Abhishek K M

\ud83d\udcbb
\"Javier
Javier Rengel Jim\u00e9nez

\ud83d\udcbb \ud83d\udc1b \u26a0\ufe0f \ud83d\ude87 \ud83d\udcd6
\"Fisa\"/
Fisa

\ud83d\udc1b
\"JoelAlander\"/
JoelAlander

\ud83d\udc1b
\"Chad
Chad Floyd

\ud83e\udd14
\"Andr\u00e9
Andr\u00e9 Claudino

\ud83e\udd14
\"12ian34\"/
12ian34

\ud83d\udcd6 \ud83d\udc1b \ud83d\udcbb
\"raphaelauv\"/
raphaelauv

\ud83d\udc1b \ud83d\udcd6
\"BA-CY\"/
BA-CY

\ud83e\udd14
\"John
John Romanski

\ud83d\udc1b
\"Jamie
Jamie Davenport

\ud83d\udc1b
\"Marcus
Marcus Windmark

\ud83e\udd14
\"Shruti
Shruti Kuber

\ud83d\udcd6
\"Fszta\"/
Fszta

\ud83d\udcd6
\"Mohamed
Mohamed Muhsin

\ud83d\udcbb
\"magants\"/
magants

\ud83e\udd14
\"Martin
Martin Carlsson

\ud83e\udd14 \ud83d\udc1b
\"Tomas
Tomas \u010cerkasas

\ud83e\udd14
\"TiFaBl\"/
TiFaBl

\ud83e\udd14
\"Eric
Eric Cecchi

\ud83d\udcbb
\"KristyMayer\"/
KristyMayer

\ud83d\udc1b
\"rahulstomar08\"/
rahulstomar08

\ud83e\udd14
\"Charles
Charles Picowski

\ud83d\udc1b
\"Matt
Matt Machczynski

\ud83d\udc1b
\"Irakli
Irakli Janiashvili

\ud83d\udcbb \ud83d\udc1b \ud83c\udfa8 \u26a0\ufe0f
\"Gordon
Gordon Lee

\ud83e\udd14 \ud83d\udcd6
\"Olly\"/
Olly

\ud83e\udd14 \ud83d\udc1b
\"gautamdoulani\"/
gautamdoulani

\ud83d\udcd6
\"David
David Peitinho

\ud83d\udc1b
\"Istvan
Istvan Meszaros

\ud83e\udd14
\"Rif\"/
Rif

\ud83d\udcd6
\"Phillip
Phillip W.

\ud83e\udd14
\"XiaozhouWang85\"/
XiaozhouWang85

\ud83e\udd14
\"Rebecca
Rebecca Sanjabi

\ud83d\udc1b
\"Kailin
Kailin L

\ud83e\udd14
\"Metin
Metin Karakus

\ud83d\udcbb
\"Yasmine\"/
Yasmine

\ud83d\udcbb
\"Piotr
Piotr Pilis

\ud83d\udcbb
\"Judah
Judah Rand

\ud83d\udc1b
\"Annebelle
Annebelle Olminkhof

\ud83d\udcd6
\"Victor
Victor Apolonio

\ud83d\udcbb
\"Rodolfo
Rodolfo Ferreira

\ud83d\udcbb
\"Patrick
Patrick Brusven

\ud83d\udcbb
\n\n\n\n\n\n\nThis project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification.\nContributions of any kind welcome!\n\n+\n", - "source_links": [], - "id": 73 - }, - { - "page_link": "https://github.com/grafana/loki", - "title": "loki readme", - "text": "

\"Loki

\n\n\"Drone\n\"Go\n\"Slack\"\n[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/loki.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:loki)\n\n# Loki: like Prometheus, but for logs.\n\nLoki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by [Prometheus](https://prometheus.io/).\nIt is designed to be very cost effective and easy to operate.\nIt does not index the contents of the logs, but rather a set of labels for each log stream.\n\nCompared to other log aggregation systems, Loki:\n\n- does not do full text indexing on logs. By storing compressed, unstructured logs and only indexing metadata, Loki is simpler to operate and cheaper to run.\n- indexes and groups log streams using the same labels you\u2019re already using with Prometheus, enabling you to seamlessly switch between metrics and logs using the same labels that you\u2019re already using with Prometheus.\n- is an especially good fit for storing [Kubernetes](https://kubernetes.io/) Pod logs. Metadata such as Pod labels is automatically scraped and indexed.\n- has native support in Grafana (needs Grafana v6.0).\n\nA Loki-based logging stack consists of 3 components:\n\n- `promtail` is the agent, responsible for gathering logs and sending them to Loki.\n- `loki` is the main server, responsible for storing logs and processing queries.\n- [Grafana](https://github.com/grafana/grafana) for querying and displaying the logs.\n\nLoki is like Prometheus, but for logs: we prefer a multidimensional label-based approach to indexing, and want a single-binary, easy to operate system with no dependencies.\nLoki differs from Prometheus by focusing on logs instead of metrics, and delivering logs via push, instead of pull.\n\n## Getting started\n\n* [Installing Loki](https://grafana.com/docs/loki/latest/installation/)\n* [Installing Promtail](https://grafana.com/docs/loki/latest/clients/promtail/installation/)\n* [Getting Started](https://grafana.com/docs/loki/latest/getting-started/)\n\n## Upgrading\n\n* [Upgrading Loki](https://grafana.com/docs/loki/latest/upgrading/)\n\n## Documentation\n\n* [Latest release](https://grafana.com/docs/loki/latest/)\n* [Upcoming release](https://grafana.com/docs/loki/next/), at the tip of the main branch\n\nCommonly used sections:\n\n- [API documentation](https://grafana.com/docs/loki/latest/api/) for getting logs into Loki.\n- [Labels](https://grafana.com/docs/loki/latest/getting-started/labels/)\n- [Operations](https://grafana.com/docs/loki/latest/operations/)\n- [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/) is an agent which tails log files and pushes them to Loki.\n- [Pipelines](https://grafana.com/docs/loki/latest/clients/promtail/pipelines/) details the log processing pipeline.\n- [Docker Driver Client](https://grafana.com/docs/loki/latest/clients/docker-driver/) is a Docker plugin to send logs directly to Loki from Docker containers.\n- [LogCLI](https://grafana.com/docs/loki/latest/getting-started/logcli/) provides a command-line interface for querying logs.\n- [Loki Canary](https://grafana.com/docs/loki/latest/operations/loki-canary/) monitors your Loki installation for missing logs.\n- [Troubleshooting](https://grafana.com/docs/loki/latest/getting-started/troubleshooting/) presents help dealing with error messages.\n- [Loki in Grafana](https://grafana.com/docs/loki/latest/getting-started/grafana/) describes how to set up a Loki datasource in Grafana.\n\n## Getting Help\n\nIf you have any questions or feedback regarding Loki:\n\n- Search existing thread in the Grafana Labs community forum for Loki: [https://community.grafana.com](https://community.grafana.com/c/grafana-loki/)\n- Ask a question on the Loki Slack channel. To invite yourself to the Grafana Slack, visit [https://slack.grafana.com/](https://slack.grafana.com/) and join the #loki channel.\n- [File an issue](https://github.com/grafana/loki/issues/new) for bugs, issues and feature suggestions.\n- Send an email to [lokiproject@googlegroups.com](mailto:lokiproject@googlegroups.com), or use the [web interface](https://groups.google.com/forum/#!forum/lokiproject).\n- UI issues should be filed directly in [Grafana](https://github.com/grafana/grafana/issues/new).\n\nYour feedback is always welcome.\n\n## Further Reading\n\n- The original [design doc](https://docs.google.com/document/d/11tjK_lvp1-SVsFZjgOTr1vV3-q6vBAsZYIQ5ZeYBkyM/view) for Loki is a good source for discussion of the motivation and design decisions.\n- Callum Styan's March 2019 DevOpsDays Vancouver talk \"[Grafana Loki: Log Aggregation for Incident Investigations][devopsdays19-talk]\".\n- Grafana Labs blog post \"[How We Designed Loki to Work Easily Both as Microservices and as Monoliths][architecture-blog]\".\n- Tom Wilkie's early-2019 CNCF Paris/FOSDEM talk \"[Grafana Loki: like Prometheus, but for logs][fosdem19-talk]\" ([slides][fosdem19-slides], [video][fosdem19-video]).\n- David Kaltschmidt's KubeCon 2018 talk \"[On the OSS Path to Full Observability with Grafana][kccna18-event]\" ([slides][kccna18-slides], [video][kccna18-video]) on how Loki fits into a cloud-native environment.\n- Goutham Veeramachaneni's blog post \"[Loki: Prometheus-inspired, open source logging for cloud natives](https://grafana.com/blog/2018/12/12/loki-prometheus-inspired-open-source-logging-for-cloud-natives/)\" on details of the Loki architecture.\n- David Kaltschmidt's blog post \"[Closer look at Grafana's user interface for Loki](https://grafana.com/blog/2019/01/02/closer-look-at-grafanas-user-interface-for-loki/)\" on the ideas that went into the logging user interface.\n\n[devopsdays19-talk]: https://grafana.com/blog/2019/05/06/how-loki-correlates-metrics-and-logs-and-saves-you-money/\n[architecture-blog]: https://grafana.com/blog/2019/04/15/how-we-designed-loki-to-work-easily-both-as-microservices-and-as-monoliths/\n[fosdem19-talk]: https://fosdem.org/2019/schedule/event/loki_prometheus_for_logs/\n[fosdem19-slides]: https://speakerdeck.com/grafana/grafana-loki-like-prometheus-but-for-logs\n[fosdem19-video]: https://mirror.as35701.net/video.fosdem.org/2019/UB2.252A/loki_prometheus_for_logs.mp4\n[kccna18-event]: https://kccna18.sched.com/event/GrXC/on-the-oss-path-to-full-observability-with-grafana-david-kaltschmidt-grafana-labs\n[kccna18-slides]: https://speakerdeck.com/davkal/on-the-path-to-full-observability-with-oss-and-launch-of-loki\n[kccna18-video]: https://www.youtube.com/watch?v=U7C5SpRtK74&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU&index=346\n\n## Contributing\n\nRefer to [CONTRIBUTING.md](CONTRIBUTING.md)\n\n### Building from source\n\nLoki can be run in a single host, no-dependencies mode using the following commands.\n\nYou need `go`, we recommend using the version found in [our build Dockerfile](https://github.com/grafana/loki/blob/main/loki-build-image/Dockerfile)\n\n```bash\n\n$ go get github.com/grafana/loki\n$ cd $GOPATH/src/github.com/grafana/loki # GOPATH is $HOME/go by default.\n\n$ go build ./cmd/loki\n$ ./loki -config.file=./cmd/loki/loki-local-config.yaml\n...\n```\n\nTo build Promtail on non-Linux platforms, use the following command:\n\n```bash\n$ go build ./clients/cmd/promtail\n```\n\nOn Linux, Promtail requires the systemd headers to be installed if\nJournal support is enabled.\nTo enable Journal support the go build tag flag `promtail_journal_enabled` should be passed\n\nWith Journal support on Ubuntu, run with the following commands:\n\n```bash\n$ sudo apt install -y libsystemd-dev\n$ go build --tags=promtail_journal_enabled ./clients/cmd/promtail\n```\n\nWith Journal support on CentOS, run with the following commands:\n\n```bash\n$ sudo yum install -y systemd-devel\n$ go build --tags=promtail_journal_enabled ./clients/cmd/promtail\n```\n\nOtherwise, to build Promtail without Journal support, run `go build`\nwith CGO disabled:\n\n```bash\n$ CGO_ENABLED=0 go build ./clients/cmd/promtail\n```\n## Adopters\nPlease see [ADOPTERS.md](ADOPTERS.md) for some of the organizations using Loki today.\nIf you would like to add your organization to the list, please open a PR to add it to the list.\n\n## License\n\nGrafana Loki is distributed under [AGPL-3.0-only](LICENSE). For Apache-2.0 exceptions, see [LICENSING.md](LICENSING.md).\n", - "source_links": [], - "id": 74 - }, - { - "page_link": "external-access.md", - "title": "external-access", - "text": "# Ship Logs to Loki from beyond this cluster\n\nLoki by default is deployed in a cluster local way. The simplest way to enable external ingress is to set your install to use basic auth, which can be done via editing your `context.yaml` file with:\n\n\n```yaml\nconfiguration:\n loki:\n hostname: loki. # you can find the configured domain in `workspace.yaml`\n basicAuth:\n user: \n password: \n```\nyou can use `plural crypto random` to generate a high-entropy password if that is helpful as well.\n\n\nOnce that file has been edited, you can run `plural build --only loki && plural deploy --commit \"configure loki ingress\"` to update your loki install.", - "source_links": [], - "id": 75 - }, - { - "page_link": "https://github.com/mage-ai/mage-ai", - "title": "mage readme", - "text": "

\n \n \n \n

\n

\n \ud83e\uddd9 A modern replacement for Airflow.\n

\n\n

\n Documentation   \ud83c\udf2a\ufe0f   \n Watch 2 min demo   \ud83c\udf0a   \n Play with live tool   \ud83d\udd25   \n \n Get instant help\n \n

\n
\n \n \"PyPi\"\n \n \n \n \n \n \"License\"\n \n \n \"Slack\"\n \n \n \"Github\n \n \n \"Docker\n \n \n \"pip\n \n
\n\n\n
\n\n### Give your data team `magical` powers\n\n
\n\n

\n Integrate and synchronize data from 3rd party sources\n

\n\n

\n Build real-time and batch pipelines to transform data using Python, SQL, and R\n

\n\n

\n Run, monitor, and orchestrate thousands of pipelines without losing sleep\n

\n\n
\n\n

1\ufe0f\u20e3 \ud83c\udfd7\ufe0f

\n

Build

\n

\n Have you met anyone who said they loved developing in Airflow?\n
\n That\u2019s why we designed an easy developer experience that you\u2019ll enjoy.\n

\n\n| | |\n| --- | --- |\n| Easy developer experience
Start developing locally with a single command or launch a dev environment in your cloud using Terraform.

Language of choice
Write code in Python, SQL, or R in the same data pipeline for ultimate flexibility.

Engineering best practices built-in
Each step in your pipeline is a standalone file containing modular code that\u2019s reusable and testable with data validations. No more DAGs with spaghetti code. | |\n\n

\n \u2193\n

\n\n

2\ufe0f\u20e3 \ud83d\udd2e

\n

Preview

\n

\n Stop wasting time waiting around for your DAGs to finish testing.\n
\n Get instant feedback from your code each time you run it.\n

\n\n| | |\n| --- | --- |\n| Interactive code
Immediately see results from your code\u2019s output with an interactive notebook UI.

Data is a first-class citizen
Each block of code in your pipeline produces data that can be versioned, partitioned, and cataloged for future use.

Collaborate on cloud
Develop collaboratively on cloud resources, version control with Git, and test pipelines without waiting for an available shared staging environment. | |\n\n

\n \u2193\n

\n\n

3\ufe0f\u20e3 \ud83d\ude80

\n

Launch

\n

\n Don\u2019t have a large team dedicated to Airflow?\n
\n Mage makes it easy for a single developer or small team to scale up and manage thousands of pipelines.\n

\n\n| | |\n| --- | --- |\n| Fast deploy
Deploy Mage to AWS, GCP, or Azure with only 2 commands using maintained Terraform templates.

Scaling made simple
Transform very large datasets directly in your data warehouse or through a native integration with Spark.

Observability
Operationalize your pipelines with built-in monitoring, alerting, and observability through an intuitive UI. | |\n\n
\n\n# \ud83e\uddd9 Intro\n\nMage is an open-source data pipeline tool for transforming and integrating data.\n\n1. [Quick start](#%EF%B8%8F-quick-start)\n1. [Demo](#-demo)\n1. [Tutorials](#-tutorials)\n1. [Documentation](https://docs.mage.ai)\n1. [Features](#-features)\n1. [Core design principles](https://docs.mage.ai/design/core-design-principles)\n1. [Core abstractions](https://docs.mage.ai/design/core-abstractions)\n1. [Contributing](https://docs.mage.ai/community/contributing)\n\n
\n\n# \ud83c\udfc3\u200d\u2640\ufe0f Quick start\n\nYou can install and run Mage using Docker (recommended), `pip`, or `conda`.\n\n### Install using Docker\n\n1. Create a new project and launch tool (change `demo_project` to any other name if you want):\n ```bash\n docker run -it -p 6789:6789 -v $(pwd):/home/src mageai/mageai \\\n /app/run_app.sh mage start demo_project\n ```\n\n - If you want to run Mage locally on a different port, change the first port after `-p`\n in the command above. For example, to change the port to `6790`, run:\n\n ```bash\n docker run -it -p 6790:6789 -v $(pwd):/home/src mageai/mageai \\\n /app/run_app.sh mage start demo_project\n ```\n\n Want to use Spark or other integrations? Read more about [integrations](https://docs.mage.ai/data-integrations/overview).\n\n1. Open [http://localhost:6789](http://localhost:6789) in your browser and build a pipeline.\n\n - If you changed the Docker port for running Mage locally, go to the url\n `http://127.0.0.1:[port]` (e.g. http://127.0.0.1:6790) in your browser to\n view the pipelines dashboard.\n\n\n### Using `pip` or `conda`\n\n1. Install Mage\n\n #### (a) To the current virtual environment:\n ```bash\n pip install mage-ai\n ```\n or\n ```bash\n conda install -c conda-forge mage-ai\n ```\n\n #### (b) To a new virtual environment (e.g., `myenv`):\n ```bash\n python3 -m venv myenv\n source myenv/bin/activate\n pip install mage-ai\n ```\n or\n ```bash\n conda create -n myenv -c conda-forge mage-ai\n conda activate myenv\n ```\n\n For additional packages (e.g. `spark`, `postgres`, etc), please see [Installing extra packages](https://docs.mage.ai/getting-started/setup#installing-extra-packages).\n\n If you run into errors, please see [Install errors](https://docs.mage.ai/getting-started/setup#errors).\n\n1. Create new project and launch tool (change `demo_project` to any other name if you want):\n ```bash\n mage start demo_project\n ```\n1. Open [http://localhost:6789](http://localhost:6789) in your browser and build a pipeline.\n\n
\n\n# \ud83c\udfae Demo\n\n### Live demo\n\nBuild and run a data pipeline with our [demo app](https://demo.mage.ai/).\n\n> WARNING\n>\n> The live demo is public to everyone, please don\u2019t save anything sensitive (e.g. passwords, secrets, etc).\n### Demo video (2 min)\n\n[![Mage quick start demo](media/mage-youtube-preview.jpg)](https://www.youtube.com/watch?v=hrsErfPDits \"Mage quick start demo\")\n\nClick the image to play video\n\n
\n\n# \ud83d\udc69\u200d\ud83c\udfeb Tutorials\n\n- [Load data from API, transform it, and export it to PostgreSQL](https://docs.mage.ai/tutorials/load-api-data)\n- [Integrate Mage into an existing Airflow project](https://docs.mage.ai/integrations/airflow)\n- [Train model on Titanic dataset](https://docs.mage.ai/tutorials/train-model)\n- [Set up DBT models and orchestrate DBT runs](https://docs.mage.ai/integrations/dbt-models)\n\n\"Fire\n\n
\n\n# \ud83d\udd2e [Features](https://docs.mage.ai/about/features)\n\n| | | |\n| --- | --- | --- |\n| \ud83c\udfb6 | [Orchestration](https://docs.mage.ai/design/data-pipeline-management) | Schedule and manage data pipelines with observability. |\n| \ud83d\udcd3 | [Notebook](https://docs.mage.ai/about/features#notebook-for-building-data-pipelines) | Interactive Python, SQL, & R editor for coding data pipelines. |\n| \ud83c\udfd7\ufe0f | [Data integrations](https://docs.mage.ai/data-integrations/overview) | Synchronize data from 3rd party sources to your internal destinations. |\n| \ud83d\udeb0 | [Streaming pipelines](https://docs.mage.ai/guides/streaming-pipeline) | Ingest and transform real-time data. |\n| \u274e | [DBT](https://docs.mage.ai/dbt/overview) | Build, run, and manage your DBT models with Mage. |\n\nA sample data pipeline defined across 3 files \u279d\n\n1. Load data \u279d\n ```python\n @data_loader\n def load_csv_from_file():\n return pd.read_csv('default_repo/titanic.csv')\n ```\n1. Transform data \u279d\n ```python\n @transformer\n def select_columns_from_df(df, *args):\n return df[['Age', 'Fare', 'Survived']]\n ```\n1. Export data \u279d\n ```python\n @data_exporter\n def export_titanic_data_to_disk(df) -> None:\n df.to_csv('default_repo/titanic_transformed.csv')\n ```\n\nWhat the data pipeline looks like in the UI \u279d\n\n\n\nNew? We recommend reading about [blocks](https://docs.mage.ai/design/blocks) and\nlearning from a [hands-on tutorial](https://docs.mage.ai/tutorials/load-api-data).\n\n[![Ask us questions on Slack](https://img.shields.io/badge/%20-Ask%20us%20questions%20on%20Slack-purple?style=for-the-badge&logo=slack&labelColor=6B50D7)](https://www.mage.ai/chat)\n\n
\n\n# \ud83c\udfd4\ufe0f [Core design principles](https://docs.mage.ai/design/core-design-principles)\n\nEvery user experience and technical design decision adheres to these principles.\n\n| | | |\n| --- | --- | --- |\n| \ud83d\udcbb | [Easy developer experience](https://docs.mage.ai/design/core-design-principles#easy-developer-experience) | Open-source engine that comes with a custom notebook UI for building data pipelines. |\n| \ud83d\udea2 | [Engineering best practices built-in](https://docs.mage.ai/design/core-design-principles#engineering-best-practices-built-in) | Build and deploy data pipelines using modular code. No more writing throwaway code or trying to turn notebooks into scripts. |\n| \ud83d\udcb3 | [Data is a first-class citizen](https://docs.mage.ai/design/core-design-principles#data-is-a-first-class-citizen) | Designed from the ground up specifically for running data-intensive workflows. |\n| \ud83e\ude90 | [Scaling is made simple](https://docs.mage.ai/design/core-design-principles#scaling-is-made-simple) | Analyze and process large data quickly for rapid iteration. |\n\n
\n\n# \ud83d\udef8 [Core abstractions](https://docs.mage.ai/design/core-abstractions)\n\nThese are the fundamental concepts that Mage uses to operate.\n\n| | |\n| --- | --- |\n| [Project](https://docs.mage.ai/design/core-abstractions#project) | Like a repository on GitHub; this is where you write all your code. |\n| [Pipeline](https://docs.mage.ai/design/core-abstractions#pipeline) | Contains references to all the blocks of code you want to run, charts for visualizing data, and organizes the dependency between each block of code. |\n| [Block](https://docs.mage.ai/design/core-abstractions#block) | A file with code that can be executed independently or within a pipeline. |\n| [Data product](https://docs.mage.ai/design/core-abstractions#data-product) | Every block produces data after it's been executed. These are called data products in Mage. |\n| [Trigger](https://docs.mage.ai/design/core-abstractions#trigger) | A set of instructions that determine when or how a pipeline should run. |\n| [Run](https://docs.mage.ai/design/core-abstractions#run) | Stores information about when it was started, its status, when it was completed, any runtime variables used in the execution of the pipeline or block, etc. |\n\n
\n\n# \ud83d\ude4b\u200d\u2640\ufe0f Contributing and developing\n\nAdd features and instantly improve the experience for everyone.\n\nCheck out the [contributing guide](https://docs.mage.ai/community/contributing)\nto setup your development environment and start building.\n\n
\n\n# \ud83d\udc68\u200d\ud83d\udc69\u200d\ud83d\udc67\u200d\ud83d\udc66 Community\nIndividually, we\u2019re a mage.\n\n> \ud83e\uddd9 Mage\n>\n> Magic is indistinguishable from advanced technology.\n> A mage is someone who uses magic (aka advanced technology).\nTogether, we\u2019re Magers!\n\n> \ud83e\uddd9\u200d\u2642\ufe0f\ud83e\uddd9 Magers (`/\u02c8m\u0101j\u0259r/`)\n>\n> A group of mages who help each other realize their full potential!\nLet\u2019s hang out and chat together \u279d\n\n[![Hang out on Slack](https://img.shields.io/badge/%20-Hang%20out%20on%20Slack-purple?style=for-the-badge&logo=slack&labelColor=6B50D7)](https://www.mage.ai/chat)\n\nFor real-time news, fun memes, data engineering topics, and more, join us on \u279d\n\n| | |\n| --- | --- |\n| \"Twitter\" | [Twitter](https://twitter.com/mage_ai) |\n| \"LinkedIn\" | [LinkedIn](https://www.linkedin.com/company/magetech/mycompany) |\n| \"GitHub\" | [GitHub](https://github.com/mage-ai/mage-ai) |\n| \"Slack\" | [Slack](https://www.mage.ai/chat) |\n\n
\n\n# \ud83e\udd14 Frequently Asked Questions (FAQs)\n\nCheck out our [FAQ page](https://www.notion.so/mageai/Mage-FAQs-33d93ee65f934ed39568f8a4bc823b39) to find answers to some of our most asked questions.\n\n
\n\n# \ud83e\udeaa License\nSee the [LICENSE](LICENSE) file for licensing information.\n\n[\"Water](https://www.mage.ai/)\n\n
\n", - "source_links": [], - "id": 76 - }, - { - "page_link": "https://github.com/meilisearch/meilisearch", - "title": "meilisearch readme", - "text": "

\n \n \n

\n\n

\n Website |\n Roadmap |\n Meilisearch Cloud |\n Blog |\n Documentation |\n FAQ |\n Discord\n

\n\n

\n \"Dependency\n \"License\"\n \"Bors\n

\n\n

\u26a1 A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow \ud83d\udd0d

\n\nMeilisearch helps you shape a delightful search experience in a snap, offering features that work out-of-the-box to speed up your workflow.\n\n

\n \n \"A\n \n \n \"A\n \n

\n\n\ud83d\udd25 [**Try it!**](https://where2watch.meilisearch.com/) \ud83d\udd25\n\n## \u2728 Features\n\n- **Search-as-you-type:** find search results in less than 50 milliseconds\n- **[Typo tolerance](https://www.meilisearch.com/docs/learn/getting_started/customizing_relevancy#typo-tolerance):** get relevant matches even when queries contain typos and misspellings\n- **[Filtering](https://www.meilisearch.com/docs/learn/fine_tuning_results/filtering) and [faceted search](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search):** enhance your user's search experience with custom filters and build a faceted search interface in a few lines of code\n- **[Sorting](https://www.meilisearch.com/docs/learn/fine_tuning_results/sorting):** sort results based on price, date, or pretty much anything else your users need\n- **[Synonym support](https://www.meilisearch.com/docs/learn/getting_started/customizing_relevancy#synonyms):** configure synonyms to include more relevant content in your search results\n- **[Geosearch](https://www.meilisearch.com/docs/learn/fine_tuning_results/geosearch):** filter and sort documents based on geographic data\n- **[Extensive language support](https://www.meilisearch.com/docs/learn/what_is_meilisearch/language):** search datasets in any language, with optimized support for Chinese, Japanese, Hebrew, and languages using the Latin alphabet\n- **[Security management](https://www.meilisearch.com/docs/learn/security/master_api_keys):** control which users can access what data with API keys that allow fine-grained permissions handling\n- **[Multi-Tenancy](https://www.meilisearch.com/docs/learn/security/tenant_tokens):** personalize search results for any number of application tenants\n- **Highly Customizable:** customize Meilisearch to your specific needs or use our out-of-the-box and hassle-free presets\n- **[RESTful API](https://www.meilisearch.com/docs/reference/api/overview):** integrate Meilisearch in your technical stack with our plugins and SDKs\n- **Easy to install, deploy, and maintain**\n\n## \ud83d\udcd6 Documentation\n\nYou can consult Meilisearch's documentation at [https://www.meilisearch.com/docs](https://www.meilisearch.com/docs/).\n\n## \ud83d\ude80 Getting started\n\nFor basic instructions on how to set up Meilisearch, add documents to an index, and search for documents, take a look at our [Quick Start](https://www.meilisearch.com/docs/learn/getting_started/quick_start) guide.\n\nYou may also want to check out [Meilisearch 101](https://www.meilisearch.com/docs/learn/getting_started/filtering_and_sorting) for an introduction to some of Meilisearch's most popular features.\n\n## \u26a1 Supercharge your Meilisearch experience\n\nSay goodbye to server deployment and manual updates with [Meilisearch Cloud](https://www.meilisearch.com/pricing?utm_campaign=oss&utm_source=engine&utm_medium=meilisearch). Get started with a 14-day free trial! No credit card required.\n\n## \ud83e\uddf0 SDKs & integration tools\n\nInstall one of our SDKs in your project for seamless integration between Meilisearch and your favorite language or framework!\n\nTake a look at the complete [Meilisearch integration list](https://www.meilisearch.com/docs/learn/what_is_meilisearch/sdks).\n\n[![Logos belonging to different languages and frameworks supported by Meilisearch, including React, Ruby on Rails, Go, Rust, and PHP](assets/integrations.png)](https://www.meilisearch.com/docs/learn/what_is_meilisearch/sdks)\n\n## \u2699\ufe0f Advanced usage\n\nExperienced users will want to keep our [API Reference](https://www.meilisearch.com/docs/reference/api/overview) close at hand.\n\nWe also offer a wide range of dedicated guides to all Meilisearch features, such as [filtering](https://www.meilisearch.com/docs/learn/fine_tuning_results/filtering), [sorting](https://www.meilisearch.com/docs/learn/fine_tuning_results/sorting), [geosearch](https://www.meilisearch.com/docs/learn/fine_tuning_results/geosearch), [API keys](https://www.meilisearch.com/docs/learn/security/master_api_keys), and [tenant tokens](https://www.meilisearch.com/docs/learn/security/tenant_tokens).\n\nFinally, for more in-depth information, refer to our articles explaining fundamental Meilisearch concepts such as [documents](https://www.meilisearch.com/docs/learn/core_concepts/documents) and [indexes](https://www.meilisearch.com/docs/learn/core_concepts/indexes).\n\n## \ud83d\udcca Telemetry\n\nMeilisearch collects **anonymized** data from users to help us improve our product. You can [deactivate this](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry#how-to-disable-data-collection) whenever you want.\n\nTo request deletion of collected data, please write to us at\u00a0[privacy@meilisearch.com](mailto:privacy@meilisearch.com). Don't forget to include your `Instance UID` in the message, as this helps us quickly find and delete your data.\n\nIf you want to know more about the kind of data we collect and what we use it for, check the [telemetry section](https://www.meilisearch.com/docs/learn/what_is_meilisearch/telemetry) of our documentation.\n\n## \ud83d\udceb Get in touch!\n\nMeilisearch is a search engine created by [Meili](https://www.welcometothejungle.com/en/companies/meilisearch), a software development company based in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/)\n\n\ud83d\uddde [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.\n\n\ud83d\udc8c Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:\n\n- For feature requests, please visit our [product repository](https://github.com/meilisearch/product/discussions)\n- Found a bug? Open an [issue](https://github.com/meilisearch/meilisearch/issues)!\n- Want to be part of our Discord community? [Join us!](https://discord.gg/meilisearch)\n\nThank you for your support!\n\n## \ud83d\udc69\u200d\ud83d\udcbb Contributing\n\nMeilisearch is, and will always be, open-source! If you want to contribute to the project, please take a look at [our contribution guidelines](CONTRIBUTING.md).\n\n## \ud83d\udce6 Versioning\n\nMeilisearch releases and their associated binaries are available [in this GitHub page](https://github.com/meilisearch/meilisearch/releases).\n\nThe binaries are versioned following [SemVer conventions](https://semver.org/). To know more, read our [versioning policy](https://github.com/meilisearch/engine-team/blob/main/resources/versioning-policy.md).\n\nDifferently from the binaries, crates in this repository are not currently available on [crates.io](https://crates.io/) and do not follow [SemVer conventions](https://semver.org).\n", - "source_links": [], - "id": 77 - }, - { - "page_link": "https://github.com/metabase/metabase", - "title": "metabase readme", - "text": "# Metabase\n\n[Metabase](https://www.metabase.com) is the easy, open-source way for everyone in your company to ask questions and learn from data.\n\n![Metabase Product Screenshot](docs/images/metabase-product-screenshot.svg)\n\n[![Latest Release](https://img.shields.io/github/release/metabase/metabase.svg?label=latest%20release)](https://github.com/metabase/metabase/releases)\n[![Circle CI](https://circleci.com/gh/metabase/metabase.svg?style=svg&circle-token=3ccf0aa841028af027f2ac9e8df17ce603e90ef9)](https://circleci.com/gh/metabase/metabase)\n[![codecov](https://codecov.io/gh/metabase/metabase/branch/master/graph/badge.svg)](https://codecov.io/gh/metabase/metabase)\n![Docker Pulls](https://img.shields.io/docker/pulls/metabase/metabase)\n\n## Features\n\n- [Set up in five minutes](https://www.metabase.com/docs/latest/setting-up-metabase.html) (we're not kidding).\n- Let anyone on your team [ask questions](https://www.metabase.com/docs/latest/users-guide/04-asking-questions.html) without knowing SQL.\n- Use the [SQL editor](https://www.metabase.com/docs/latest/users-guide/writing-sql.html) for more complex queries.\n- Build handsome, interactive [dashboards](https://www.metabase.com/docs/latest/users-guide/07-dashboards.html) with filters, auto-refresh, fullscreen, and custom click behavior.\n- Create [models](https://www.metabase.com/learn/getting-started/models) that clean up, annotate, and/or combine raw tables.\n- Define canonical [segments and metrics](https://www.metabase.com/docs/latest/administration-guide/07-segments-and-metrics.html) for your team to use.\n- Send data to Slack or email on a schedule with [dashboard subscriptions](https://www.metabase.com/docs/latest/users-guide/dashboard-subscriptions).\n- Set up [alerts](https://www.metabase.com/docs/latest/users-guide/15-alerts.html) to have Metabase notify you when your data changes.\n- [Embed charts and dashboards](https://www.metabase.com/docs/latest/administration-guide/13-embedding.html) in your app, or even [your entire Metabase](https://www.metabase.com/docs/latest/enterprise-guide/full-app-embedding.html).\n\nTake a [tour of Metabase](https://www.metabase.com/learn/getting-started/tour-of-metabase).\n\n## Supported databases\n\n- [Officially supported databases](../../databases/connecting.md#connecting-to-supported-databases)\n- [Community-supported drivers](../partner-and-community-drivers.md#community-drivers)\n\n## Installation\n\nMetabase can be run just about anywhere. Check out our [Installation Guides](https://www.metabase.com/docs/latest/operations-guide/installing-metabase.html).\n\n## Contributing\n\nTo get started with a development installation of the Metabase, check out our [Developers Guide](https://www.metabase.com/docs/latest/developers-guide/start).\n\n## Internationalization\n\nWe want Metabase to be available in as many languages as possible. See which translations are available and help contribute to internationalization using our project over at [POEditor](https://poeditor.com/join/project/ynjQmwSsGh). You can also check out our [policies on translations](https://www.metabase.com/docs/latest/administration-guide/localization.html).\n\n## Extending Metabase\n\nMetabase also allows you to hit our Query API directly from Javascript to integrate the simple analytics we provide with your own application or third party services to do things like:\n\n- Build moderation interfaces.\n- Export subsets of your users to third party marketing automation software.\n- Provide a specialized customer lookup application for the people in your company.\n\nCheck out our guide, [Working with the Metabase API](https://www.metabase.com/learn/administration/metabase-api).\n\n## Security Disclosure\n\nSee [SECURITY.md](./SECURITY.md) for details.\n\n## License\n\nThis repository contains the source code for both the Open Source edition of Metabase, released under the AGPL, as well as the [commercial editions of Metabase](https://www.metabase.com/pricing), which are released under the Metabase Commercial Software License.\n\nSee [LICENSE.txt](./LICENSE.txt) for details.\n\nUnless otherwise noted, all files \u00a9 2022 Metabase, Inc.\n\n## [Metabase Experts](https://www.metabase.com/partners/)\n\nIf you\u2019d like more technical resources to set up your data stack with Metabase, connect with a [Metabase Expert](https://www.metabase.com/partners/?utm_source=readme&utm_medium=metabase-expetrs&utm_campaign=readme).\n", - "source_links": [], - "id": 78 - }, - { - "page_link": "https://github.com/grafana/mimir", - "title": "mimir readme", - "text": "# Grafana Mimir\n\n

\"Grafana

\n\nGrafana Mimir is an open source software project that provides a scalable long-term storage for [Prometheus](https://prometheus.io). Some of the core strengths of Grafana Mimir include:\n\n- **Easy to install and maintain:** Grafana Mimir\u2019s extensive documentation, tutorials, and deployment tooling make it quick to get started. Using its monolithic mode, you can get Grafana Mimir up and running with just one binary and no additional dependencies. Once deployed, the best-practice dashboards, alerts, and runbooks packaged with Grafana Mimir make it easy to monitor the health of the system.\n- **Massive scalability:** You can run Grafana Mimir's horizontally-scalable architecture across multiple machines, resulting in the ability to process orders of magnitude more time series than a single Prometheus instance. Internal testing shows that Grafana Mimir handles up to 1 billion active time series.\n- **Global view of metrics:** Grafana Mimir enables you to run queries that aggregate series from multiple Prometheus instances, giving you a global view of your systems. Its query engine extensively parallelizes query execution, so that even the highest-cardinality queries complete with blazing speed.\n- **Cheap, durable metric storage:** Grafana Mimir uses object storage for long-term data storage, allowing it to take advantage of this ubiquitous, cost-effective, high-durability technology. It is compatible with multiple object store implementations, including AWS S3, Google Cloud Storage, Azure Blob Storage, OpenStack Swift, as well as any S3-compatible object storage.\n- **High availability:** Grafana Mimir replicates incoming metrics, ensuring that no data is lost in the event of machine failure. Its horizontally scalable architecture also means that it can be restarted, upgraded, or downgraded with zero downtime, which means no interruptions to metrics ingestion or querying.\n- **Natively multi-tenant:** Grafana Mimir\u2019s multi-tenant architecture enables you to isolate data and queries from independent teams or business units, making it possible for these groups to share the same cluster. Advanced limits and quality-of-service controls ensure that capacity is shared fairly among tenants.\n\n## Migrating to Grafana Mimir\n\nIf you're migrating to Grafana Mimir, refer to the following documents:\n\n- [Migrating from Thanos or Prometheus to Grafana Mimir](https://grafana.com/docs/mimir/latest/migrate/migrating-from-thanos-or-prometheus/).\n- [Migrating from Cortex to Grafana Mimir](https://grafana.com/docs/mimir/latest/migrate/migrate-from-cortex/)\n\n## Deploying Grafana Mimir\n\nFor information about how to deploy Grafana Mimir, refer to [Deploy Grafana Mimir](https://grafana.com/docs/mimir/latest/operators-guide/deploy-grafana-mimir/).\n\n## Getting started\n\nIf you\u2019re new to Grafana Mimir, read the [Getting started guide](https://grafana.com/docs/mimir/latest/operators-guide/get-started/).\n\nBefore deploying Grafana Mimir in a production environment, read:\n\n1. [An overview of Grafana Mimir\u2019s architecture](https://grafana.com/docs/mimir/latest/operators-guide/architecture/)\n1. [Configure Grafana Mimir](https://grafana.com/docs/mimir/latest/operators-guide/configure/)\n1. [Run Grafana Mimir in production](https://grafana.com/docs/mimir/latest/operators-guide/run-production-environment/)\n\n## Documentation\n\nRefer to the following links to access Grafana Mimir documentation:\n\n- [Latest release](https://grafana.com/docs/mimir/latest/)\n- [Upcoming release](https://grafana.com/docs/mimir/next/), at the tip of the main branch\n\n## Contributing\n\nTo contribute to Grafana Mimir, refer to [Contributing to Grafana Mimir](https://github.com/grafana/mimir/tree/main/docs/internal/contributing).\n\n## Join the Grafana Mimir discussion\n\nIf you have any questions or feedback regarding Grafana Mimir, join the [Grafana Mimir Discussion](https://github.com/grafana/mimir/discussions). Alternatively, consider joining the monthly [Grafana Mimir Community Call](https://docs.google.com/document/d/1E4jJcGicvLTyMEY6cUFFZUg_I8ytrBuW8r5yt1LyMv4).\n\nYour feedback is always welcome, and you can also share it via the [`#mimir` Slack channel](https://slack.grafana.com/).\n\n## License\n\nGrafana Mimir is distributed under [AGPL-3.0-only](LICENSE).\n", - "source_links": [], - "id": 79 - }, - { - "page_link": "external-access.md", - "title": "external-access", - "text": "# Ship Metrics to Mimir from beyond this cluster\n\nMimir by default is deployed in a cluster local way. The simplest way to enable external ingress is to set your install to use basic auth, which can be done via editing your `context.yaml` file with:\n\n\n```yaml\nconfiguration:\n mimir:\n hostname: mimir. # you can find the configured domain in `workspace.yaml`\n basicAuth:\n user: \n password: \n```\nyou can use `plural crypto random` to generate a high-entropy password if that is helpful as well.\n\n\nOnce that file has been edited, you can run `plural build --only mimir && plural deploy --commit \"configure loki ingress\"` to update your loki install.\n\nWe have often seen people use remote prometheus writes to ship metrics from a prometheus scraper to this centralized mimir instance.\n\n## Connection Setup\n\nTo authenticate to your mimir instance from a remote metric shipper, you'll need to add two headers:\n\n```\nAuthentication: Basic b64(:)\nX-Scope-OrgID: \n```\n\nYou'll need to base64 encode the username:password pair, which can be done with `echo $user:$password | base64`. Since we set up mimir with multi-tenancy, you'll need to add an `X-Scope-OrgID` with a tenant header, which the default global tenant is just the name of your plural cluster found in `workspace.yaml`\n", - "source_links": [], - "id": 80 - }, - { - "page_link": null, - "title": "minecraft readme", - "text": null, - "source_links": [], - "id": 81 - }, - { - "page_link": "https://github.com/minio/minio", - "title": "minio readme", - "text": "# MinIO Quickstart Guide\n\n[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE)\n\n[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io)\n\nMinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.\n\nThis README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).\n\n## Container Installation\n\nUse the following commands to run a standalone MinIO server as a container.\n\nStandalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication\nrequire distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,\nwith a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)\nfor more complete documentation.\n\n### Stable\n\nRun the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:\n\n```sh\npodman run -p 9000:9000 -p 9001:9001 \\\n quay.io/minio/minio server /data --console-address \":9001\"\n```\n\nThe MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded\nobject browser built into MinIO Server. Point a web browser running on the host machine to and log in with the\nroot credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.\n\nYou can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See\n[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,\nsee to view MinIO SDKs for supported languages.\n\n> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.\n\n## macOS\n\nUse the following commands to run a standalone MinIO server on macOS.\n\nStandalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.\n\n### Homebrew (recommended)\n\nRun the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.\n\n```sh\nbrew install minio/stable/minio\nminio server /data\n```\n\n> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.\n\n```sh\nbrew uninstall minio\nbrew install minio/stable/minio\n```\n\nThe MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.\n\nYou can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages.\n\n### Binary Download\n\nUse the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.\n\n```sh\nwget https://dl.min.io/server/minio/release/darwin-amd64/minio\nchmod +x minio\n./minio server /data\n```\n\nThe MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.\n\nYou can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages.\n\n## GNU/Linux\n\nUse the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.\n\n```sh\nwget https://dl.min.io/server/minio/release/linux-amd64/minio\nchmod +x minio\n./minio server /data\n```\n\nReplace ``/data`` with the path to the drive or directory in which you want MinIO to store data.\n\nThe following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.\n\n| Architecture | URL |\n| -------- | ------ |\n| 64-bit Intel/AMD | |\n| 64-bit ARM | |\n| 64-bit PowerPC LE (ppc64le) | |\n| IBM Z-Series (S390X) | |\n\nThe MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.\n\nYou can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages.\n\n> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.\n\n## Microsoft Windows\n\nTo run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:\n\n```sh\nhttps://dl.min.io/server/minio/release/windows-amd64/minio.exe\n```\n\nUse the following command to run a standalone MinIO server on the Windows host. Replace ``D:\\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:\n\n```sh\nminio.exe server D:\\\n```\n\nThe MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.\n\nYou can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages.\n\n> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.\n\n## Install from Source\n\nUse the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.18](https://golang.org/dl/#stable)\n\n```sh\ngo install github.com/minio/minio@latest\n```\n\nThe MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.\n\nYou can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages.\n\n> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.\n\nMinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.\n\n## Deployment Recommendations\n\n### Allow port access for Firewalls\n\nBy default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.\n\n### ufw\n\nFor hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000\n\n```sh\nufw allow 9000\n```\n\nBelow command enables all incoming traffic to ports ranging from 9000 to 9010.\n\n```sh\nufw allow 9000:9010/tcp\n```\n\n### firewall-cmd\n\nFor hosts with firewall-cmd enabled (CentOS), you can use `firewall-cmd` command to allow traffic to specific ports. Use below commands to allow access to port 9000\n\n```sh\nfirewall-cmd --get-active-zones\n```\n\nThis command gets the active zone(s). Now, apply port rules to the relevant zones returned above. For example if the zone is `public`, use\n\n```sh\nfirewall-cmd --zone=public --add-port=9000/tcp --permanent\n```\n\nNote that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.\n\n```sh\nfirewall-cmd --reload\n```\n\n### iptables\n\nFor hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow\naccess to port 9000\n\n```sh\niptables -A INPUT -p tcp --dport 9000 -j ACCEPT\nservice iptables restart\n```\n\nBelow command enables all incoming traffic to ports ranging from 9000 to 9010.\n\n```sh\niptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT\nservice iptables restart\n```\n\n## Test MinIO Connectivity\n\n### Test using MinIO Console\n\nMinIO Server comes with an embedded web based object browser. Point your web browser to to ensure your server has started successfully.\n\n> NOTE: MinIO runs console on random port by default if you wish choose a specific port use `--console-address` to pick a specific interface and port.\n\n### Things to consider\n\nMinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.\n\nFor deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.\n\nFor example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.\n\nSimilarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.\n\nFor example: `export MINIO_SERVER_URL=\"https://minio.example.net\"`\n\n| Dashboard | Creating a bucket |\n| ------------- | ------------- |\n| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) |\n\n## Test using MinIO Client `mc`\n\n`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.\n\n## Upgrading MinIO\n\nUpgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.\n\n> NOTE: requires internet access to update directly from , optionally you can host any mirrors at \n\n- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)\n\n```sh\nmc admin update \n```\n\n- For deployments without external internet access (e.g. airgapped environments), download the binary from and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`.\n\n- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`.\n\n### Upgrade Checklist\n\n- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.\n- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest releases upon every releases. Some releases may not be relevant to your setup, avoid upgrading production environments unnecessarily.\n- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.\n- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.\n- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**\n\n## Explore Further\n\n- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)\n- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)\n- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)\n- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)\n\n## Contribute to MinIO Project\n\nPlease follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)\n\n## License\n\n- MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.\n- MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) \u00a9 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).\n- [License Compliance](https://github.com/minio/minio/blob/master/COMPLIANCE.md)\n", - "source_links": [], - "id": 82 - }, - { - "page_link": "https://github.com/mlflow/mlflow", - "title": "mlflow readme", - "text": null, - "source_links": [], - "id": 83 - }, - { - "page_link": "https://github.com/mongodb/mongo", - "title": "mongodb readme", - "text": "# ![Logo](docs/leaf.svg) MongoDB README\n\nWelcome to MongoDB!\n\n## Components\n\n - `mongod` - The database server.\n - `mongos` - Sharding router.\n - `mongo` - The database shell (uses interactive javascript).\n\n## Utilities\n\n `install_compass` - Installs MongoDB Compass for your platform.\n\n## Building\n\n See [Building MongoDB](docs/building.md).\n\n## Running\n\n For command line options invoke:\n\n ```bash\n $ ./mongod --help\n ```\n\n To run a single server database:\n\n ```bash\n $ sudo mkdir -p /data/db\n $ ./mongod\n $\n $ # The mongo javascript shell connects to localhost and test database by default:\n $ ./mongo\n > help\n ```\n\n## Installing Compass\n\n You can install compass using the `install_compass` script packaged with MongoDB:\n\n ```bash\n $ ./install_compass\n ```\n\n This will download the appropriate MongoDB Compass package for your platform\n and install it.\n\n## Drivers\n\n Client drivers for most programming languages are available at\n https://docs.mongodb.com/manual/applications/drivers/. Use the shell\n (`mongo`) for administrative tasks.\n\n## Bug Reports\n\n See https://github.com/mongodb/mongo/wiki/Submit-Bug-Reports.\n\n## Packaging\n\n Packages are created dynamically by the [buildscripts/packager.py](buildscripts/packager.py) script.\n This will generate RPM and Debian packages.\n\n## Documentation\n\n https://docs.mongodb.com/manual/\n\n## Cloud Hosted MongoDB\n\n https://www.mongodb.com/cloud/atlas\n\n## Forums\n\n - https://community.mongodb.com\n\n Technical questions about using MongoDB.\n\n - https://community.mongodb.com/c/server-dev\n\n Technical questions about building and developing MongoDB.\n\n## Learn MongoDB\n\n https://university.mongodb.com/\n\n## LICENSE\n\n MongoDB is free and the source is available. Versions released prior to\n October 16, 2018 are published under the AGPL. All versions released after\n October 16, 2018, including patch fixes for prior versions, are published\n under the [Server Side Public License (SSPL) v1](LICENSE-Community.txt).\n See individual files for details.\n", - "source_links": [], - "id": 84 - }, - { - "page_link": "https://github.com/prometheus/prometheus", - "title": "monitoring readme", - "text": "# Prometheus\n\n[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/main.svg?style=shield)][circleci]\n[![Docker Repository on Quay](https://quay.io/repository/prometheus/prometheus/status)][quay]\n[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub]\n[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)\n[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)\n[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)\n\nVisit [prometheus.io](https://prometheus.io) for the full documentation,\nexamples and guides.\n\nPrometheus, a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics\nfrom configured targets at given intervals, evaluates rule expressions,\ndisplays the results, and can trigger alerts when specified conditions are observed.\n\nThe features that distinguish Prometheus from other metrics and monitoring systems are:\n\n* A **multi-dimensional** data model (time series defined by metric name and set of key/value dimensions)\n* PromQL, a **powerful and flexible query language** to leverage this dimensionality\n* No dependency on distributed storage; **single server nodes are autonomous**\n* An HTTP **pull model** for time series collection\n* **Pushing time series** is supported via an intermediary gateway for batch jobs\n* Targets are discovered via **service discovery** or **static configuration**\n* Multiple modes of **graphing and dashboarding support**\n* Support for hierarchical and horizontal **federation**\n\n## Architecture overview\n\n![Architecture overview](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg)\n\n## Install\n\nThere are various ways of installing Prometheus.\n\n### Precompiled binaries\n\nPrecompiled binaries for released versions are available in the\n[*download* section](https://prometheus.io/download/)\non [prometheus.io](https://prometheus.io). Using the latest production release binary\nis the recommended way of installing Prometheus.\nSee the [Installing](https://prometheus.io/docs/introduction/install/)\nchapter in the documentation for all the details.\n\n### Docker images\n\nDocker images are available on [Quay.io](https://quay.io/repository/prometheus/prometheus) or [Docker Hub](https://hub.docker.com/r/prom/prometheus/).\n\nYou can launch a Prometheus container for trying it out with\n\n```bash\ndocker run --name prometheus -d -p 127.0.0.1:9090:9090 prom/prometheus\n```\n\nPrometheus will now be reachable at .\n\n### Building from source\n\nTo build Prometheus from source code, You need:\n\n* Go [version 1.17 or greater](https://golang.org/doc/install).\n* NodeJS [version 16 or greater](https://nodejs.org/).\n* npm [version 7 or greater](https://www.npmjs.com/).\n\nStart by cloning the repository:\n\n```bash\ngit clone https://github.com/prometheus/prometheus.git\ncd prometheus\n```\n\nYou can use the `go` tool to build and install the `prometheus`\nand `promtool` binaries into your `GOPATH`:\n\n```bash\nGO111MODULE=on go install github.com/prometheus/prometheus/cmd/...\nprometheus --config.file=your_config.yml\n```\n\n*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to\nread its web assets from local filesystem directories under `web/ui/static` and\n`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus\nfrom the root of the cloned repository. Note also that these directories do not include the\nReact UI unless it has been built explicitly using `make assets` or `make build`.\n\nAn example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml)\n\nYou can also build using `make build`, which will compile in the web assets so that\nPrometheus can be run from anywhere:\n\n```bash\nmake build\n./prometheus --config.file=your_config.yml\n```\n\nThe Makefile provides several targets:\n\n* *build*: build the `prometheus` and `promtool` binaries (includes building and compiling in web assets)\n* *test*: run the tests\n* *test-short*: run the short tests\n* *format*: format the source code\n* *vet*: check the source code for common errors\n* *assets*: build the React UI\n\n### Service discovery plugins\n\nPrometheus is bundled with many service discovery plugins.\nWhen building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)\nfile to disable some service discoveries. The file is a yaml-formated list of go\nimport path that will be built into the Prometheus binary.\n\nAfter you have changed the file, you\nneed to run `make build` again.\n\nIf you are using another method to compile Prometheus, `make plugins` will\ngenerate the plugins file accordingly.\n\nIf you add out-of-tree plugins, which we do not endorse at the moment,\nadditional steps might be needed to adjust the `go.mod` and `go.sum` files. As\nalways, be extra careful when loading third party code.\n\n### Building the Docker image\n\nThe `make docker` target is designed for use in our CI system.\nYou can build a docker image locally with the following commands:\n\n```bash\nmake promu\npromu crossbuild -p linux/amd64\nmake npm_licenses\nmake common-docker-amd64\n```\n\n*NB* if you are on a Mac, you will need [gnu-tar](https://formulae.brew.sh/formula/gnu-tar).\n\n## Using Prometheus as a Go Library\n\n### Remote Write\n\nWe are publishing our Remote Write protobuf independently at\n[buf.build](https://buf.build/prometheus/prometheus/assets).\n\nYou can use that as a library:\n\n```shell\ngo get go.buf.build/protocolbuffers/go/prometheus/prometheus\n```\n\nThis is experimental.\n\n### Prometheus code base\n\nIn order to comply with [go mod](https://go.dev/ref/mod#versions) rules,\nPrometheus release number do not exactly match Go module releases. For the\nPrometheus v2.y.z releases, we are publishing equivalent v0.y.z tags.\n\nTherefore, a user that would want to use Prometheus v2.35.0 as a library could do:\n\n```shell\ngo get github.com/prometheus/prometheus@v0.35.0\n```\n\nThis solution makes it clear that we might break our internal Go APIs between\nminor user-facing releases, as [breaking changes are allowed in major version\nzero](https://semver.org/#spec-item-4).\n\n## React UI Development\n\nFor more information on building, running, and developing on the React-based UI, see the React app's [README.md](web/ui/README.md).\n\n## More information\n\n* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.\n* You will find a CircleCI configuration in [`.circleci/config.yml`](.circleci/config.yml).\n* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.\n\n## Contributing\n\nRefer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md)\n\n## License\n\nApache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE).\n\n[hub]: https://hub.docker.com/r/prom/prometheus/\n[circleci]: https://circleci.com/gh/prometheus/prometheus\n[quay]: https://quay.io/repository/prometheus/prometheus\n", - "source_links": [], - "id": 85 - }, - { - "page_link": "https://github.com/mysql/mysql-server", - "title": "mysql readme", - "text": null, - "source_links": [], - "id": 86 - }, - { - "page_link": "https://github.com/n8n-io/n8n", - "title": "n8n readme", - "text": "![n8n.io - Workflow Automation](https://user-images.githubusercontent.com/65276001/173571060-9f2f6d7b-bac0-43b6-bdb2-001da9694058.png)\n\n# n8n - Workflow automation tool\n\nn8n is an extendable workflow automation tool. With a [fair-code](http://faircode.io) distribution model, n8n\nwill always have visible source code, be available to self-host, and allow you to add your own custom\nfunctions, logic and apps. n8n's node-based approach makes it highly versatile, enabling you to connect\nanything to everything.\n\n![n8n.io - Screenshot](https://raw.githubusercontent.com/n8n-io/n8n/master/assets/n8n-screenshot.png)\n\n## Demo\n\n[:tv: A short video (< 4 min)](https://www.youtube.com/watch?v=RpjQTGKm-ok) that goes over key concepts of\ncreating workflows in n8n.\n\n## Available integrations\n\nn8n has 200+ different nodes to automate workflows. The list can be found on:\n[https://n8n.io/integrations](https://n8n.io/integrations)\n\n## Documentation\n\nThe official n8n documentation can be found under: [https://docs.n8n.io](https://docs.n8n.io)\n\nAdditional information and example workflows on the n8n.io website: [https://n8n.io](https://n8n.io)\n\nThe changelog can be found [here](https://docs.n8n.io/reference/changelog.html) and the list of breaking\nchanges [here](https://github.com/n8n-io/n8n/blob/master/packages/cli/BREAKING-CHANGES.md).\n\n## Usage\n\n- :books: Learn\n [how to **install** and **use** it from the command line](https://github.com/n8n-io/n8n/tree/master/packages/cli/README.md)\n- :whale: Learn\n [how to run n8n in **Docker**](https://github.com/n8n-io/n8n/tree/master/docker/images/n8n/README.md)\n\n## Start\n\nExecute: `npx n8n`\n\n## n8n cloud\n\nSign-up for an [n8n cloud](https://www.n8n.io/cloud/) account.\n\nWhile n8n cloud and n8n are the same in terms of features, n8 cloud provides certain conveniences such as:\n\n- Not having to set up and maintain your n8n instance\n- Managed OAuth for authentication\n- Easily upgrading to the newer n8n versions\n\n## Support\n\nIf you have problems or questions go to our forum, we will then try to help you asap:\n\n[https://community.n8n.io](https://community.n8n.io)\n\n## Jobs\n\nIf you are interested in working for n8n and so shape the future of the project check out our\n[job posts](https://apply.workable.com/n8n/)\n\n## What does n8n mean and how do you pronounce it?\n\n**Short answer:** It means \"nodemation\" and it is pronounced as n-eight-n.\n\n**Long answer:** \"I get that question quite often (more often than I expected) so I decided it is probably\nbest to answer it here. While looking for a good name for the project with a free domain I realized very\nquickly that all the good ones I could think of were already taken. So, in the end, I chose nodemation.\n'node-' in the sense that it uses a Node-View and that it uses Node.js and '-mation' for 'automation' which is\nwhat the project is supposed to help with. However, I did not like how long the name was and I could not\nimagine writing something that long every time in the CLI. That is when I then ended up on 'n8n'.\" - **Jan\nOberhauser, Founder and CEO, n8n.io**\n\n## Development setup\n\nHave you found a bug :bug: ? Or maybe you have a nice feature :sparkles: to contribute ? The\n[CONTRIBUTING guide](https://github.com/n8n-io/n8n/blob/master/CONTRIBUTING.md) will help you get your\ndevelopment environment ready in minutes.\n\n## License\n\nn8n is [fair-code](http://faircode.io) distributed under the\n[**Sustainable Use License**](https://github.com/n8n-io/n8n/blob/master/packages/cli/LICENSE.md) and the\n[**n8n Enterprise License**](https://github.com/n8n-io/n8n/blob/master/packages/cli/LICENSE_EE.md).\n\nAdditional information about the license model can be found in the\n[docs](https://docs.n8n.io/reference/license/).\n", - "source_links": [], - "id": 87 - }, - { - "page_link": "https://github.com/neo4j/neo4j", - "title": "neo4j readme", - "text": null, - "source_links": [], - "id": 88 - }, - { - "page_link": "https://github.com/nextcloud/server", - "title": "nextcloud readme", - "text": "# Nextcloud Server \u2601\n[![Scrutinizer Code Quality](https://scrutinizer-ci.com/g/nextcloud/server/badges/quality-score.png?b=master)](https://scrutinizer-ci.com/g/nextcloud/server/?branch=master)\n[![codecov](https://codecov.io/gh/nextcloud/server/branch/master/graph/badge.svg)](https://codecov.io/gh/nextcloud/server)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/209/badge)](https://bestpractices.coreinfrastructure.org/projects/209)\n\n**A safe home for all your data.**\n\n![](https://raw.githubusercontent.com/nextcloud/screenshots/master/files/Files%20Sharing.png)\n\n## Why is this so awesome? \ud83e\udd29\n\n* \ud83d\udcc1 **Access your Data** You can store your files, contacts, calendars, and more on a server of your choosing.\n* \ud83d\udd04 **Sync your Data** You keep your files, contacts, calendars, and more synchronized amongst your devices.\n* \ud83d\ude4c **Share your Data** \u2026by giving others access to the stuff you want them to see or to collaborate with.\n* \ud83d\ude80 **Expandable with hundreds of Apps** ...like [Calendar](https://github.com/nextcloud/calendar), [Contacts](https://github.com/nextcloud/contacts), [Mail](https://github.com/nextcloud/mail), [Video Chat](https://github.com/nextcloud/spreed) and all those you can discover in our [App Store](https://apps.nextcloud.com)\n* \ud83d\udd12 **Security** with our encryption mechanisms, [HackerOne bounty program](https://hackerone.com/nextcloud) and two-factor authentication.\n\nDo you want to learn more about how you can use Nextcloud to access, share and protect your files, calendars, contacts, communication & more at home and in your organization? [**Learn about all our Features**](https://nextcloud.com/athome/).\n\n## Get your Nextcloud \ud83d\ude9a\n\n- \u2611\ufe0f [**Simply sign up**](https://nextcloud.com/signup/) at one of our providers either through our website or through the apps directly.\n- \ud83d\udda5 [**Install** a server by yourself](https://nextcloud.com/install/#instructions-server) on your hardware or by using one of our ready to use **appliances**\n- \ud83d\udce6 Buy one of the [awesome **devices** coming with a preinstalled Nextcloud](https://nextcloud.com/devices/)\n- \ud83c\udfe2 Find a [service **provider**](https://nextcloud.com/providers/) who hosts Nextcloud for you or your company\n\nEnterprise? Public Sector or Education user? You may want to have a look into [**Nextcloud Enterprise**](https://nextcloud.com/enterprise/) provided by Nextcloud GmbH.\n\n## Get in touch \ud83d\udcac\n\n* [\ud83d\udccb Forum](https://help.nextcloud.com)\n* [\ud83d\udc65 Facebook](https://www.facebook.com/nextclouders)\n* [\ud83d\udc23 Twitter](https://twitter.com/Nextclouders)\n* [\ud83d\udc18 Mastodon](https://mastodon.xyz/@nextcloud)\n\nYou can also [get support for Nextcloud](https://nextcloud.com/support)!\n\n\n## Join the team \ud83d\udc6a\n\nThere are many ways to contribute, of which development is only one! Find out [how to get involved](https://nextcloud.com/contribute/), including as a translator, designer, tester, helping others, and much more! \ud83d\ude0d\n\n\n### Development setup \ud83d\udc69\u200d\ud83d\udcbb\n\n1. \ud83d\ude80 [Set up your local development environment](https://docs.nextcloud.com/server/latest/developer_manual/getting_started/devenv.html)\n2. \ud83d\udc1b [Pick a good first issue](https://github.com/nextcloud/server/labels/good%20first%20issue)\n3. \ud83d\udc69\u200d\ud83d\udd27 Create a branch and make your changes. Remember to sign off your commits using `git commit -sm \"Your commit message\"`\n4. \u2b06 Create a [pull request](https://opensource.guide/how-to-contribute/#opening-a-pull-request) and `@mention` the people from the issue to review\n5. \ud83d\udc4d Fix things that come up during a review\n6. \ud83c\udf89 Wait for it to get merged!\n\nThird-party components are handled as git submodules which have to be initialized first. So aside from the regular git checkout invoking `git submodule update --init` or a similar command is needed, for details see Git documentation.\n\nSeveral apps that are included by default in regular releases such as [First run wizard](https://github.com/nextcloud/firstrunwizard) or [Activity](https://github.com/nextcloud/activity) are missing in `master` and have to be installed manually by cloning them into the `apps` subfolder.\n\nOtherwise, git checkouts can be handled the same as release archives, by using the `stable*` branches. Note they should never be used on production systems.\n\n### Working with front-end code \ud83c\udfd7\n\n#### Building\n\nWe are moving more and more toward using Vue.js in the front-end, starting with Settings. For building the code on changes, use these terminal commands in the root folder:\n\n```bash\n# install dependencies\nmake dev-setup\n\n# build for development\nmake build-js\n\n# build for development and watch edits\nmake watch-js\n\n# build for production with minification\nmake build-js-production\n```\n\n#### Committing changes\n\n**When making changes, also commit the compiled files!**\n\nWe still use Handlebars templates in some places in Files and Settings. We will replace these step-by-step with Vue.js, but in the meantime, you need to compile them separately.\n\nIf you don\u2019t have Handlebars installed yet, you can do it with this terminal command:\n```bash\nsudo npm install -g handlebars\n```\n\nThen inside the root folder of your local Nextcloud development installation, run this command in the terminal every time you changed a `.handlebars` file to compile it:\n```bash\n./build/compile-handlebars-templates.sh\n```\n\nBefore checking in JS changes, make sure to also build for production:\n```bash\nmake build-js-production\n```\nThen add the compiled files for committing.\n\nTo save some time, to only rebuild for a specific app, use the following and replace the module with the app name:\n```bash\nMODULE=user_status make build-js-production\n```\n\nPlease note that if you used `make build-js` or `make watch-js` before, you'll notice that a lot of files were marked as changed, so might need to clear the workspace first.\n\n### Working with back-end code \ud83c\udfd7\n\nWhen changing back-end PHP code, in general, no additional steps are needed before checking in.\n\nHowever, if new files were created, you will need to run the following command to update the autoloader files:\n```bash\nbuild/autoloaderchecker.sh\n```\n\nAfter that, please also include the autoloader file changes in your commits.\n\n### Tools we use \ud83d\udee0\n\n- [\ud83d\udc40 BrowserStack](https://browserstack.com) for cross-browser testing\n- [\ud83c\udf0a WAVE](https://wave.webaim.org/extension/) for accessibility testing\n- [\ud83d\udea8 Lighthouse](https://developers.google.com/web/tools/lighthouse/) for testing performance, accessibility, and more\n\n\n## Contribution guidelines \ud83d\udcdc\n\nAll contributions to this repository from June 16, 2016, and onward are considered to be\nlicensed under the AGPLv3 or any later version.\n\nNextcloud doesn't require a CLA (Contributor License Agreement).\nThe copyright belongs to all the individual contributors. Therefore we recommend\nthat every contributor adds the following line to the header of a file if they\nchanged it substantially:\n\n```\n@copyright Copyright (c) , ()\n```\n\nPlease read the [Code of Conduct](https://nextcloud.com/community/code-of-conduct/). This document offers some guidance to ensure Nextcloud participants can cooperate effectively in a positive and inspiring atmosphere, and to explain how together we can strengthen and support each other.\n\nPlease review the [guidelines for contributing](.github/CONTRIBUTING.md) to this repository.\n\nMore information how to contribute: [https://nextcloud.com/contribute/](https://nextcloud.com/contribute/)\n", - "source_links": [], - "id": 89 - }, - { - "page_link": "https://github.com/nocodb/nocodb", - "title": "nocodb readme", - "text": "

\n \n The Open Source Airtable Alternative
\n

\n\n

\nTurns any MySQL, PostgreSQL, SQL Server, SQLite & MariaDB into a smart-spreadsheet.\n

\n\n\n
\n \n[![Build Status](https://travis-ci.org/dwyl/esta.svg?branch=master)](https://travis-ci.com/github/NocoDB/NocoDB) \n[![Node version](https://img.shields.io/badge/node-%3E%3D%2014.18.0-brightgreen)](http://nodejs.org/download/)\n[![Conventional Commits](https://img.shields.io/badge/Conventional%20Commits-1.0.0-green.svg)](https://conventionalcommits.org)\n\n
\n\n

\n Website \u2022\n Discord \u2022\n Community \u2022\n Twitter \u2022\n Reddit \u2022\n Documentation\n

\n\n![All Views](https://user-images.githubusercontent.com/35857179/194825053-3aa3373d-3e0f-4b42-b3f1-42928332054a.gif)\n\n

\n \n \"Deploy\n \n

\n\n
\n\n[](scripts/markdown/readme/languages/chinese.md)\n[](scripts/markdown/readme/languages/french.md)\n[](scripts/markdown/readme/languages/german.md)\n[](scripts/markdown/readme/languages/spanish.md)\n[](scripts/markdown/readme/languages/portuguese.md)\n[](scripts/markdown/readme/languages/italian.md)\n[](scripts/markdown/readme/languages/japanese.md)\n[](scripts/markdown/readme/languages/korean.md)\n[](scripts/markdown/readme/languages/russian.md)\n\n
\n\n\n

See other languages \u00bb

\n\n\n\n# Join Our Team\n

\n\n# Join Our Community\n\n\n\"\"\n\n\n\n[![Stargazers repo roster for @nocodb/nocodb](https://reporoster.com/stars/nocodb/nocodb)](https://github.com/nocodb/nocodb/stargazers)\n\n\n# Quick try\n\n## 1-Click Deploy to Heroku\n\nBefore doing so, make sure you have a Heroku account. By default, an add-on Heroku Postgres will be used as meta database. You can see the connection string defined in `DATABASE_URL` by navigating to Heroku App Settings and selecting Config Vars.\n\n\n \"Deploy\n\n\n
\n\n## NPX\n\nYou can run below command if you need an interactive configuration.\n\n```\nnpx create-nocodb-app\n```\n\n\n\n## Node Application\n\nWe provide a simple NodeJS Application for getting started.\n\n```bash\ngit clone https://github.com/nocodb/nocodb-seed\ncd nocodb-seed\nnpm install\nnpm start\n```\n\n## Docker \n\n```bash\n# for SQLite\ndocker run -d --name nocodb \\\n-v \"$(pwd)\"/nocodb:/usr/app/data/ \\\n-p 8080:8080 \\\nnocodb/nocodb:latest\n\n# for MySQL\ndocker run -d --name nocodb-mysql \\\n-v \"$(pwd)\"/nocodb:/usr/app/data/ \\\n-p 8080:8080 \\\n-e NC_DB=\"mysql2://host.docker.internal:3306?u=root&p=password&d=d1\" \\\n-e NC_AUTH_JWT_SECRET=\"569a1821-0a93-45e8-87ab-eb857f20a010\" \\\nnocodb/nocodb:latest\n\n# for PostgreSQL\ndocker run -d --name nocodb-postgres \\\n-v \"$(pwd)\"/nocodb:/usr/app/data/ \\\n-p 8080:8080 \\\n-e NC_DB=\"pg://host.docker.internal:5432?u=root&p=password&d=d1\" \\\n-e NC_AUTH_JWT_SECRET=\"569a1821-0a93-45e8-87ab-eb857f20a010\" \\\nnocodb/nocodb:latest\n\n# for MSSQL\ndocker run -d --name nocodb-mssql \\\n-v \"$(pwd)\"/nocodb:/usr/app/data/ \\\n-p 8080:8080 \\\n-e NC_DB=\"mssql://host.docker.internal:1433?u=root&p=password&d=d1\" \\\n-e NC_AUTH_JWT_SECRET=\"569a1821-0a93-45e8-87ab-eb857f20a010\" \\\nnocodb/nocodb:latest\n```\n\n> To persist data in docker you can mount volume at `/usr/app/data/` since 0.10.6. Otherwise your data will be lost after recreating the container.\n\n> If you plan to input some special characters, you may need to change the character set and collation yourself when creating the database. Please check out the examples for [MySQL Docker](https://github.com/nocodb/nocodb/issues/1340#issuecomment-1049481043).\n\n## Binaries\n##### MacOS (x64)\n```bash\ncurl http://get.nocodb.com/macos-x64 -o nocodb -L && chmod +x nocodb && ./nocodb\n```\n\n##### MacOS (arm64)\n```bash\ncurl http://get.nocodb.com/macos-arm64 -o nocodb -L && chmod +x nocodb && ./nocodb\n```\n\n##### Linux (x64)\n```bash\ncurl http://get.nocodb.com/linux-x64 -o nocodb -L && chmod +x nocodb && ./nocodb\n```\n##### Linux (arm64)\n```bash\ncurl http://get.nocodb.com/linux-arm64 -o nocodb -L && chmod +x nocodb && ./nocodb\n```\n\n##### Windows (x64)\n```bash\niwr http://get.nocodb.com/win-x64.exe\n.\\Noco-win-x64.exe\n```\n\n##### Windows (arm64)\n```bash\niwr http://get.nocodb.com/win-arm64.exe\n.\\Noco-win-arm64.exe\n```\n\n## Docker Compose\n\nWe provide different docker-compose.yml files under [this directory](https://github.com/nocodb/nocodb/tree/master/docker-compose). Here are some examples.\n\n```bash\ngit clone https://github.com/nocodb/nocodb\n# for MySQL\ncd nocodb/docker-compose/mysql\n# for PostgreSQL\ncd nocodb/docker-compose/pg\n# for MSSQL\ncd nocodb/docker-compose/mssql\ndocker-compose up -d\n```\n\n> To persist data in docker, you can mount volume at `/usr/app/data/` since 0.10.6. Otherwise your data will be lost after recreating the container.\n\n> If you plan to input some special characters, you may need to change the character set and collation yourself when creating the database. Please check out the examples for [MySQL Docker Compose](https://github.com/nocodb/nocodb/issues/1313#issuecomment-1046625974).\n\n# GUI\n\nAccess Dashboard using : [http://localhost:8080/dashboard](http://localhost:8080/dashboard)\n\n# Screenshots\n\n![1](https://user-images.githubusercontent.com/35857179/194844858-d353bd15-1edf-406c-889b-ba60f76831f4.png)\n![2](https://user-images.githubusercontent.com/35857179/194844872-1a1094b9-761b-4ab6-a0ab-8e11dcae6571.png)\n![3](https://user-images.githubusercontent.com/35857179/194844881-23f12c4c-7a5f-403e-928c-ef4c53b2665d.png)\n![4](https://user-images.githubusercontent.com/35857179/194844885-faaf042f-bad2-4924-84f0-2c08813271d8.png)\n![5](https://user-images.githubusercontent.com/35857179/194844886-a17006e0-979d-493f-83c4-0e72f5a9b716.png)\n![6](https://user-images.githubusercontent.com/35857179/194844890-b9f265ae-6e40-4fa5-9267-d1367c27c8e6.png)\n![7](https://user-images.githubusercontent.com/35857179/194844891-bee9aea3-aff3-4247-a918-b2f3fbbc672e.png)\n![8](https://user-images.githubusercontent.com/35857179/194844893-82d5e21b-ae61-41bd-9990-31ad659bf490.png)\n![9](https://user-images.githubusercontent.com/35857179/194844897-cfd79946-e413-4c97-b16d-eb4d7678bb79.png)\n![10](https://user-images.githubusercontent.com/35857179/194844902-c0122570-0dd5-41cf-a26f-6f8d71fefc99.png)\n![11](https://user-images.githubusercontent.com/35857179/194844903-c1e47f40-e782-4f5d-8dce-6449cc70b181.png)\n![12](https://user-images.githubusercontent.com/35857179/194844907-09277d3e-cbbf-465c-9165-6afc4161e279.png)\n\n# Table of Contents\n\n- [Quick try](#quick-try)\n * [1-Click Deploy to Heroku](#1-click-deploy-to-heroku)\n * [NPX](#npx)\n * [Node Application](#node-application)\n * [Docker](#docker)\n * [Docker Compose](#docker-compose)\n- [GUI](#gui)\n- [Join Our Community](#join-our-community)\n- [Screenshots](#screenshots)\n- [Table of Contents](#table-of-contents)\n- [Features](#features)\n + [Rich Spreadsheet Interface](#rich-spreadsheet-interface)\n + [App Store for Workflow Automations](#app-store-for-workflow-automations)\n + [Programmatic Access](#programmatic-access)\n + [Sync Schema](#sync-schema)\n + [Audit](#audit)\n- [Production Setup](#production-setup)\n * [Environment variables](#environment-variables)\n- [Development Setup](#development-setup)\n- [Contributing](#contributing)\n- [Why are we building this?](#why-are-we-building-this)\n- [Our Mission](#our-mission)\n- [License](#license)\n- [Contributors](#contributors)\n\n# Features\n\n### Rich Spreadsheet Interface\n\n- \u26a1  Basic Operations: Create, Read, Update and Delete on Tables, Columns, and Rows\n- \u26a1  Fields Operations: Sort, Filter, Hide / Unhide Columns\n- \u26a1  Multiple Views Types: Grid (By default), Gallery, Form View and Kanban View\n- \u26a1  View Permissions Types: Collaborative Views, & Locked Views \n- \u26a1  Share Bases / Views: either Public or Private (with Password Protected)\n- \u26a1  Variant Cell Types: ID, LinkToAnotherRecord, Lookup, Rollup, SingleLineText, Attachement, Currency, Formula and etc\n- \u26a1  Access Control with Roles : Fine-grained Access Control at different levels\n- \u26a1  and more ...\n\n### App Store for Workflow Automations\n\nWe provide different integrations in three main categories. See App Store for details.\n\n- \u26a1  Chat : Slack, Discord, Mattermost, and etc\n- \u26a1  Email : AWS SES, SMTP, MailerSend, and etc\n- \u26a1  Storage : AWS S3, Google Cloud Storage, Minio, and etc\n\n### Programmatic Access\n\nWe provide the following ways to let users to invoke actions in a programmatic way. You can use a token (either JWT or Social Auth) to sign your requests for authorization to NocoDB. \n\n- \u26a1  REST APIs\n- \u26a1  NocoDB SDK\n\n### Sync Schema\n\nWe allow you to sync schema changes if you have made changes outside NocoDB GUI. However, it has to be noted then you will have to bring your own schema migrations for moving from environment to others. See Sync Schema for details.\n\n### Audit \n\nWe are keeping all the user operation logs under one place. See Audit for details.\n\n# Production Setup \n\nBy default, SQLite is used for storing meta data. However, you can specify your own database. The connection params for this database can be specified in `NC_DB` environment variable. Moreover, we also provide the below environment variables for configuration.\n\n## Environment variables \n\nPlease refer to [Environment variables](https://docs.nocodb.com/getting-started/installation#environment-variables)\n\n# Development Setup \n\nPlease refer to [Development Setup](https://docs.nocodb.com/engineering/development-setup)\n\n# Contributing\n\nPlease refer to [Contribution Guide](https://github.com/nocodb/nocodb/blob/master/.github/CONTRIBUTING.md).\n\n# Why are we building this?\nMost internet businesses equip themselves with either spreadsheet or a database to solve their business needs. Spreadsheets are used by a Billion+ humans collaboratively every single day. However, we are way off working at similar speeds on databases which are way more powerful tools when it comes to computing. Attempts to solve this with SaaS offerings has meant horrible access controls, vendor lockin, data lockin, abrupt price changes & most importantly a glass ceiling on what's possible in future.\n\n# Our Mission\nOur mission is to provide the most powerful no-code interface for databases which is open source to every single internet business in the world. This would not only democratise access to a powerful computing tool but also bring forth a billion+ people who will have radical tinkering-and-building abilities on internet. \n\n# License \n

\nThis project is licensed under AGPLv3.\n

\n\n# Contributors\n\nThank you for your contributions! We appreciate all the contributions from the community.\n\n\n \n", - "source_links": [], - "id": 90 - }, - { - "page_link": "https://github.com/NVIDIA/gpu-operator", - "title": "nvidia-operator readme", - "text": "[![license](https://img.shields.io/github/license/NVIDIA/gpu-operator?style=flat-square)](https://raw.githubusercontent.com/NVIDIA/gpu-operator/master/LICENSE)\n[![pipeline status](https://gitlab.com/nvidia/kubernetes/gpu-operator/badges/master/pipeline.svg)](https://gitlab.com/nvidia/kubernetes/gpu-operator/-/pipelines)\n[![coverage report](https://gitlab.com/nvidia/kubernetes/gpu-operator/badges/master/coverage.svg)](https://gitlab.com/nvidia/kubernetes/gpu-operator/-/pipelines)\n\n# NVIDIA GPU Operator\n\n![nvidia-gpu-operator](https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/egx/nvidia-egx-platform-gold-image-full-2c50-d@2x.jpg)\n\nKubernetes provides access to special hardware resources such as NVIDIA GPUs, NICs, Infiniband adapters and other devices through the [device plugin framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). However, configuring and managing nodes with these hardware resources requires configuration of multiple software components such as drivers, container runtimes or other libraries which are difficult and prone to errors.\nThe NVIDIA GPU Operator uses the [operator framework](https://cloud.redhat.com/blog/introducing-the-operator-framework) within Kubernetes to automate the management of all NVIDIA software components needed to provision GPU. These components include the NVIDIA drivers (to enable CUDA), Kubernetes device plugin for GPUs, the NVIDIA Container Runtime, automatic node labelling, [DCGM](https://developer.nvidia.com/dcgm) based monitoring and others.\n\n## Audience and Use-Cases\nThe GPU Operator allows administrators of Kubernetes clusters to manage GPU nodes just like CPU nodes in the cluster. Instead of provisioning a special OS image for GPU nodes, administrators can rely on a standard OS image for both CPU and GPU nodes and then rely on the GPU Operator to provision the required software components for GPUs.\n\nNote that the GPU Operator is specifically useful for scenarios where the Kubernetes cluster needs to scale quickly - for example provisioning additional GPU nodes on the cloud or on-prem and managing the lifecycle of the underlying software components. Since the GPU Operator runs everything as containers including NVIDIA drivers, the administrators can easily swap various components - simply by starting or stopping containers.\n\n## Product Documentation\nFor information on platform support and getting started, visit the official documentation [repository](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/overview.html).\n\n## Webinar\n[How to easily use GPUs on Kubernetes](https://info.nvidia.com/how-to-use-gpus-on-kubernetes-webinar.html)\n\n## Contributions\n[Read the document on contributions](https://github.com/NVIDIA/gpu-operator/blob/master/CONTRIBUTING.md). You can contribute by opening a [pull request](https://help.github.com/en/articles/about-pull-requests).\n\n## Support and Getting Help\nPlease open [an issue on the GitHub project](https://github.com/NVIDIA/gpu-operator/issues/new) for any questions. Your feedback is appreciated.\n", - "source_links": [], - "id": 91 - }, - { - "page_link": "https://github.com/oauth2-proxy/oauth2-proxy", - "title": "oauth2-proxy readme", - "text": "![OAuth2 Proxy](/docs/static/img/logos/OAuth2_Proxy_horizontal.svg)\n\n[![Build Status](https://secure.travis-ci.org/oauth2-proxy/oauth2-proxy.svg?branch=master)](http://travis-ci.org/oauth2-proxy/oauth2-proxy)\n[![Go Report Card](https://goreportcard.com/badge/github.com/oauth2-proxy/oauth2-proxy)](https://goreportcard.com/report/github.com/oauth2-proxy/oauth2-proxy)\n[![GoDoc](https://godoc.org/github.com/oauth2-proxy/oauth2-proxy?status.svg)](https://godoc.org/github.com/oauth2-proxy/oauth2-proxy)\n[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE)\n[![Maintainability](https://api.codeclimate.com/v1/badges/a58ff79407212e2beacb/maintainability)](https://codeclimate.com/github/oauth2-proxy/oauth2-proxy/maintainability)\n[![Test Coverage](https://api.codeclimate.com/v1/badges/a58ff79407212e2beacb/test_coverage)](https://codeclimate.com/github/oauth2-proxy/oauth2-proxy/test_coverage)\n\nA reverse proxy and static file server that provides authentication using Providers (Google, GitHub, and others)\nto validate accounts by email, domain or group.\n\n**Note:** This repository was forked from [bitly/OAuth2_Proxy](https://github.com/bitly/oauth2_proxy) on 27/11/2018.\nVersions v3.0.0 and up are from this fork and will have diverged from any changes in the original fork.\nA list of changes can be seen in the [CHANGELOG](CHANGELOG.md).\n\n**Note:** This project was formerly hosted as `pusher/oauth2_proxy` but has been renamed as of 29/03/2020 to `oauth2-proxy/oauth2-proxy`.\nGoing forward, all images shall be available at `quay.io/oauth2-proxy/oauth2-proxy` and binaries will be named `oauth2-proxy`.\n\n![Sign In Page](https://cloud.githubusercontent.com/assets/45028/4970624/7feb7dd8-6886-11e4-93e0-c9904af44ea8.png)\n\n## Installation\n\n1. Choose how to deploy:\n\n a. Download [Prebuilt Binary](https://github.com/oauth2-proxy/oauth2-proxy/releases) (current release is `v7.3.0`)\n\n b. Build with `$ go get github.com/oauth2-proxy/oauth2-proxy/v7` which will put the binary in `$GOROOT/bin`\n\n c. Using the prebuilt docker image [quay.io/oauth2-proxy/oauth2-proxy](https://quay.io/oauth2-proxy/oauth2-proxy) (AMD64, PPC64LE, ARMv6, ARMv8 and ARM64 available)\n\n Prebuilt binaries can be validated by extracting the file and verifying it against the `sha256sum.txt` checksum file provided for each release starting with version `v3.0.0`.\n\n ```\n sha256sum -c sha256sum.txt 2>&1 | grep OK\n oauth2-proxy-x.y.z.linux-amd64: OK\n ```\n\n2. [Select a Provider and Register an OAuth Application with a Provider](https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/oauth_provider)\n3. [Configure OAuth2 Proxy using config file, command line options, or environment variables](https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview)\n4. [Configure SSL or Deploy behind a SSL endpoint](https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/tls) (example provided for Nginx)\n\n\n## Security\n\nIf you are running a version older than v6.0.0 we **strongly recommend you please update** to a current version.\nSee [open redirect vulnerability](https://github.com/oauth2-proxy/oauth2-proxy/security/advisories/GHSA-5m6c-jp6f-2vcv) for details.\n\n## Docs\n\nRead the docs on our [Docs site](https://oauth2-proxy.github.io/oauth2-proxy/docs/).\n\n![OAuth2 Proxy Architecture](https://cloud.githubusercontent.com/assets/45028/8027702/bd040b7a-0d6a-11e5-85b9-f8d953d04f39.png)\n\n## Getting Involved\n\nIf you would like to reach out to the maintainers, come talk to us in the `#oauth2-proxy` channel in the [Gophers slack](http://gophers.slack.com/).\n\n## Contributing\n\nPlease see our [Contributing](CONTRIBUTING.md) guidelines. For releasing see our [release creation guide](RELEASE.md).\n", - "source_links": [], - "id": 92 - }, - { - "page_link": "https://github.com/grafana/oncall", - "title": "oncall readme", - "text": "\n\n[![Latest Release](https://img.shields.io/github/v/release/grafana/oncall?display_name=tag&sort=semver)](https://github.com/grafana/oncall/releases)\n[![License](https://img.shields.io/github/license/grafana/oncall)](https://github.com/grafana/oncall/blob/dev/LICENSE)\n[![Docker Pulls](https://img.shields.io/docker/pulls/grafana/oncall)](https://hub.docker.com/r/grafana/oncall/tags)\n[![Slack](https://img.shields.io/badge/join%20slack-%23grafana-%2Doncall-brightgreen.svg)](https://slack.grafana.com/)\n[![Discussion](https://img.shields.io/badge/discuss-oncall%20forum-orange.svg)](https://github.com/grafana/oncall/discussions)\n[![Build Status](https://drone.grafana.net/api/badges/grafana/oncall/status.svg?ref=refs/heads/dev)](https://drone.grafana.net/grafana/oncall)\n\nDeveloper-friendly incident response with brilliant Slack integration.\n\n\n\n- Collect and analyze alerts from multiple monitoring systems\n- On-call rotations based on schedules\n- Automatic escalations\n- Phone calls, SMS, Slack, Telegram notifications\n\n## Getting Started\n\nWe prepared multiple environments: [production](https://grafana.com/docs/grafana-cloud/oncall/open-source/#production-environment), [developer](DEVELOPER.md) and hobby:\n\n1. Download docker-compose.yaml:\n\n```bash\ncurl -fsSL https://raw.githubusercontent.com/grafana/oncall/dev/docker-compose.yml -o docker-compose.yml\n```\n\n2. Set variables:\n\n```bash\necho \"DOMAIN=http://localhost:8080\nCOMPOSE_PROFILES=with_grafana # Remove this line if you want to use existing grafana\nSECRET_KEY=my_random_secret_must_be_more_than_32_characters_long\nRABBITMQ_PASSWORD=rabbitmq_secret_pw\nMYSQL_PASSWORD=mysql_secret_pw\" > .env\n```\n\n3. Launch services:\n\n```bash\ndocker-compose up -d\n```\n\n4. Issue one-time invite token:\n\n```bash\ndocker-compose run engine python manage.py issue_invite_for_the_frontend --override\n```\n\n**Note**: if you remove the plugin configuration and reconfigure it, you will need to generate a new one-time invite token for your new configuration.\n\n5. Go to [OnCall Plugin Configuration](http://localhost:3000/plugins/grafana-oncall-app), using log in credentials as defined above: `admin`/`admin` (or find OnCall plugin in configuration->plugins) and connect OnCall _plugin_ with OnCall _backend_:\n\n```\nInvite token: ^^^ from the previous step.\nOnCall backend URL: http://engine:8080\nGrafana Url: http://grafana:3000\n```\n\n6. Enjoy! Check our [OSS docs](https://grafana.com/docs/grafana-cloud/oncall/open-source/) if you want to set up Slack, Telegram, Twilio or SMS/calls through Grafana Cloud.\n\n## Update version\n\nTo update your Grafana OnCall hobby environment:\n\n```shell\n# Update Docker image\ndocker-compose pull engine\n\n# Re-deploy\ndocker-compose up -d\n```\n\nAfter updating the engine, you'll also need to click the \"Update\" button on the [plugin version page](http://localhost:3000/plugins/grafana-oncall-app?page=version-history).\nSee [Grafana docs](https://grafana.com/docs/grafana/latest/administration/plugin-management/#update-a-plugin) for more info on updating Grafana plugins.\n\n## Join community\n\n\n\n\n\n## Stargazers over time\n\n[![Stargazers over time](https://starchart.cc/grafana/oncall.svg)](https://starchart.cc/grafana/oncall)\n\n## Further Reading\n\n- _Migration from the PagerDuty_ - [Migrator](https://github.com/grafana/oncall/tree/dev/tools/pagerduty-migrator)\n- _Documentation_ - [Grafana OnCall](https://grafana.com/docs/grafana-cloud/oncall/)\n- _Blog Post_ - [Announcing Grafana OnCall, the easiest way to do on-call management](https://grafana.com/blog/2021/11/09/announcing-grafana-oncall/)\n- _Presentation_ - [Deep dive into the Grafana, Prometheus, and Alertmanager stack for alerting and on-call management](https://grafana.com/go/observabilitycon/2021/alerting/?pg=blog)\n", - "source_links": [], - "id": 93 - }, - { - "page_link": null, - "title": "openmetadata readme", - "text": null, - "source_links": [], - "id": 94 - }, - { - "page_link": "https://github.com/pluralsh/plural", - "title": "plural readme", - "text": "

\n \n

\n\n\n

\n The fastest way to build great infrastructure\n

\n\n

\n Plural empowers you to build and maintain cloud-native and production-ready open source infrastructure on Kubernetes.\n

\n\n

\n \ud83d\ude80\ud83d\udd28\u2601\ufe0f\n

\n\n

\n \n
\n

\n\n

\n \n \"Discord\"\n \n \n \n \n \"GitHub\n \"GitHub\n \"GitHub\n \"GitHub\n \"GitHub\n \"GitHub\n

\n\n\n## \u2728 Features\n\nPlural will deploy open source applications on Kubernetes in your cloud using common standards like Helm and Terraform.\n\nThe Plural platform provides the following:\n\n* Dependency management between Terraform/Helm modules, with dependency-aware deployment and upgrades.\n* Authenticated docker registry and chartmuseum proxy per repository.\n* Secret encryption using AES-256 (so you can keep the entire workflow in git).\n\nIn addition, Plural also handles:\n* Issuing the certificates.\n* Configuring a DNS service to register fully-qualified domains under onplural.sh to eliminate the hassle of DNS registration for users.\n* Being an OIDC provider to enable zero touch login security for all Plural applications.\n\nWe think it's pretty cool! \ud83d\ude0e Some other nice things:\n\n### \u2601\ufe0f Build and manage open cloud-native architectures\n\n

\n
\n \n

\n

\n\nThe plural platform ingests all deployment artifacts needed to deploy cloud-native applications and tracks their dependencies, allowing for easy installs and no-touch upgrades post-install.\n\n### \ud83e\udd16 Day-2 Operational Toolkit\n\n

\n \n
\n

\n\nScale deploys with operational run-books for key cluster operations. Every dependency is automatically upgraded in the correct order, in a rolling manner. Plural provides a timestamped audit trail for all cluster applications along with searchable, downloadable logs. In addition, there are also pre-packaged dashboards for the highest importance metrics.\n\n### \ud83d\udd13 Secure by default\nPlural performs regular security scans for application images, helm charts, and terraform modules and comes equipped with OpenID connect for user auth to applications deployed by Plural.\n\n### \ud83e\udd73 Open source and extensible\nAll Plural applications are fully customizable and able to be ejected at any time. New applications can be wrapped and packaged onto Plural easily, giving you total freedom about how, when and where to use Plural.\n

\n\n\n## \ud83d\udcfd Check out a video Demo\n\nhttps://user-images.githubusercontent.com/28541758/164427949-3f14cfbb-cf5e-40dc-8996-385691ec2f01.mp4\n\n

\n\n\n## \ud83c\udfc1 Getting Started\n\n1. Go to https://app.plural.sh to create an account.
\n*Note: This is simply to track your installations and allow for the delivery of automated upgrades, you will not be asked to provide any infrastructure credentials or sensitive information.*\n2. [Install the Plural CLI](https://docs.plural.sh/getting-started/getting-started#install-plural-cli)\n3. [Create and initialize a new git repo](https://docs.plural.sh/getting-started/getting-started#create-your-plural-repo) to store your Plural installation.\n4. [Install, build and deploy applications](https://docs.plural.sh/getting-started/getting-started#install-plural-applications) from the Plural marketplace\n5. [Install the Plural Management Console](https://docs.plural.sh/basic-setup-and-deployment/admin-console).\n\nYou should now have a fully functioning Plural environment with apps and the management console running. For more details or further information check out the rest of the docs below.\n\n### The Plural Workflow\n\nThe workflow is literally two commands:\n\n```bash\nplural build\nplural deploy\n```\n\nOur tooling will take care of the rest.\n

\n\n\n\n## \ud83d\udcda Documentation\n\nThe full documentation is available on our [Documentation site](https://docs.plural.sh/).\n

\n\n\n\n## \ud83d\udcac Community\n\nFor general help, please refer to the Plural documentation. For additional help you can use the following channels:\n\n* [Discord](https://discord.gg/pluralsh) (For live discussions with the Plural team).\n* [GitHub](https://github.com/pluralsh/plural/) (Bug reports, feature requests, contributions).\n* [Twitter](https://twitter.com/plural_sh) (For our latest news).\n\nPlural is dedicated to providing a welcoming, diverse, and harassment-free experience for everyone. We expect everyone in the community to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). *Please read it.*\n

\n\n\n## \ud83d\ude97 Roadmap\nSee what we're working on in these GitHub projects. Help us prioritize issues by reacting with an emoji on the issue!\n* Application Onboarding Roadmap: https://github.com/orgs/pluralsh/projects/2/views/2\n* Plural Core Issues: https://github.com/pluralsh/plural/issues\n* Plural CLI Issues: https://github.com/pluralsh/plural-cli/issues\n

\n\n\n\n## \ud83d\ude4c Contributing to Plural\n\nWe love contributions to Plural, big or small! To learn more about the repo and the architecture, see our [Contribution Guide](CONTRIBUTING.md).
\n\nIf you're not sure where to start, or if you have any questions, please open a draft PR or visit our [Discord](https://discord.gg/pluralsh) server where the core team can help answer your questions.\n

\n\n\n\n## \ud83d\udcdd License\n\nSee [LICENSE](LICENSE) for licensing information. If there are any questions on the license please visit our [Discord](https://discord.gg/pluralsh).\n\n## Thanks to all the contributors \u2764\n \n \n \n", - "source_links": [], - "id": 95 - }, - { - "page_link": "https://github.com/zalando/postgres-operator", - "title": "postgres readme", - "text": "# Postgres Operator\n\n![Tests](https://github.com/zalando/postgres-operator/workflows/operator-tests/badge.svg)\n![E2E Tests](https://github.com/zalando/postgres-operator/workflows/operator-e2e-tests/badge.svg)\n[![Coverage Status](https://coveralls.io/repos/github/zalando/postgres-operator/badge.svg?branch=master)](https://coveralls.io/github/zalando/postgres-operator?branch=master)\n\n\n\nThe Postgres Operator delivers an easy to run highly-available [PostgreSQL](https://www.postgresql.org/)\nclusters on Kubernetes (K8s) powered by [Patroni](https://github.com/zalando/patroni).\nIt is configured only through Postgres manifests (CRDs) to ease integration into automated CI/CD\npipelines with no access to Kubernetes API directly, promoting infrastructure as code vs manual operations.\n\n### Operator features\n\n* Rolling updates on Postgres cluster changes, incl. quick minor version updates\n* Live volume resize without pod restarts (AWS EBS, PVC)\n* Database connection pooling with PGBouncer\n* Support fast in place major version upgrade. Supports global upgrade of all clusters.\n* Restore and cloning Postgres clusters on AWS, GCS and Azure\n* Additionally logical backups to S3 or GCS bucket can be configured\n* Standby cluster from S3 or GCS WAL archive\n* Configurable for non-cloud environments\n* Basic credential and user management on K8s, eases application deployments\n* Support for custom TLS certificates\n* UI to create and edit Postgres cluster manifests\n* Support for AWS EBS gp2 to gp3 migration, supporting iops and throughput configuration\n* Compatible with OpenShift\n\n### PostgreSQL features\n\n* Supports PostgreSQL 15, starting from 10+\n* Streaming replication cluster via Patroni\n* Point-In-Time-Recovery with\n[pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) /\n[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)\n* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),\n[pg_stat_statements](https://www.postgresql.org/docs/15/pgstatstatements.html),\n[pgextwlist](https://github.com/dimitri/pgextwlist),\n[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)\n* Incl. popular Postgres extensions such as\n[decoderbufs](https://github.com/debezium/postgres-decoderbufs),\n[hypopg](https://github.com/HypoPG/hypopg),\n[pg_cron](https://github.com/citusdata/pg_cron),\n[pg_partman](https://github.com/pgpartman/pg_partman),\n[pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache),\n[pgq](https://github.com/pgq/pgq),\n[plpgsql_check](https://github.com/okbob/plpgsql_check),\n[postgis](https://postgis.net/),\n[set_user](https://github.com/pgaudit/set_user) and\n[timescaledb](https://github.com/timescale/timescaledb)\n\nThe Postgres Operator has been developed at Zalando and is being used in\nproduction for over three years.\n\n## Using Spilo 12 images or lower\n\nIf you are already using the Postgres operator in older version with a Spilo 12 Docker image you need to be aware of the changes for the backup path.\nWe introduce the major version into the backup path to smoothen the [major version upgrade](docs/administrator.md#minor-and-major-version-upgrade) that is now supported.\n\nThe new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.\nThis comes at potential performance costs and should be disabled after a few days.\n\nThe newest Spilo image is: `ghcr.io/zalando/spilo-15:2.1-p9`\n\nThe last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`\n\n\n## Getting started\n\nFor a quick first impression follow the instructions of this\n[tutorial](docs/quickstart.md).\n\n## Supported setups of Postgres and Applications\n\n![Features](docs/diagrams/neutral_operator.png)\n\n## Documentation\n\nThere is a browser-friendly version of this documentation at\n[postgres-operator.readthedocs.io](https://postgres-operator.readthedocs.io)\n\n* [How it works](docs/index.md)\n* [Installation](docs/quickstart.md#deployment-options)\n* [The Postgres experience on K8s](docs/user.md)\n* [The Postgres Operator UI](docs/operator-ui.md)\n* [DBA options - from RBAC to backup](docs/administrator.md)\n* [Build, debug and extend the operator](docs/developer.md)\n* [Configuration options](docs/reference/operator_parameters.md)\n* [Postgres manifest reference](docs/reference/cluster_manifest.md)\n* [Command-line options and environment variables](docs/reference/command_line_and_environment.md)\n\n## Community\n\nThere are two places to get in touch with the community:\n1. The [GitHub issue tracker](https://github.com/zalando/postgres-operator/issues)\n2. The **#postgres-operator** [slack channel](https://postgres-slack.herokuapp.com)\n", - "source_links": [], - "id": 96 - }, - { - "page_link": "backup-restore.md", - "title": "backup-restore", - "text": "## Postgres Backup and Restore\n\nZalando's postgres operator has a number of useful backup/restore features that can be useful if you want to leverage them.\nThe two main one's we've used is:\n\n* clone from object storage - backs up snapshots and WAL logs to S3 or an equivalent object store where you can then pull it back down\n* clone from another instance - nice for creating hot backups\n\n### Finding your postgres database\n\nThe process will involve usage of kubectl so it's useful to get familiar with how to manage postgres with zalando's custom resource and kubectl:\n\n```sh\nkubectl get postgresql -n $namespace # get postgres isntances in a namespace\nkubectl get postgresql $name -n $namespace -o yaml > db.yaml # dump the yaml for a postgres instance to a file\nkubectl delete postgresql $name -n $namespace # deletes an instance\n```\n\nThese are some basic commands you'll likely want to use\n\n### Clone from object storage\n\nThe procedure from this is relatively simple:\n\n* run `kubectl get postgresql -n -o yaml > db.yaml` to get the current yaml\n* copy the `uid` in the metadata section of the yam, you'll need it later. This can be found in a block like:\n\n```yaml\napiVersion: acid.zalan.do/v1\nkind: postgresql\nmetadata:\n annotations:\n meta.helm.sh/release-name: airbyte\n meta.helm.sh/release-namespace: airbyte\n creationTimestamp: \"2022-10-31T23:53:27Z\"\n generation: 6\n labels:\n app.kubernetes.io/instance: airbyte\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/name: postgres\n app.kubernetes.io/version: 1.16.0\n helm.sh/chart: postgres-0.1.16\n name: plural-airbyte\n namespace: airbyte\n resourceVersion: \"966219670\"\n uid: 40c1d314-f667-421e-a059-f4521e8eb811\nspec:\n databases:\n airbyte: airbyte\n numberOfInstances: 2\n postgresql:\n parameters:\n max_connections: \"101\"\n version: \"13\"\n resources:\n limits:\n cpu: \"2\"\n memory: 1Gi\n requests:\n cpu: 50m\n memory: 100Mi\n teamId: plural\n users:\n airbyte:\n - superuser\n - createdb\n volume:\n size: 27Gi\n```\n* delete your existing cluster (you can also create a hot standby if you want using the subsequent station just in case)\n * `kubectl delete postgresql -n ` is the command here\n* add a `clone` block within the spec field of your postgres db, that will look something like. You'll also want to strip out extraneous fields from the metadata, `creationTimestamp`, `generation`, `resourceVersion`, `uid`. K8s will probably understand this but still good practice.\n\n```yaml\napiVersion: acid.zalan.do/v1\nkind: postgresql\nmetadata:\n annotations:\n meta.helm.sh/release-name: airbyte\n meta.helm.sh/release-namespace: airbyte\n creationTimestamp: \"2022-10-31T23:53:27Z\"\n generation: 6\n labels:\n app.kubernetes.io/instance: airbyte\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/name: postgres\n app.kubernetes.io/version: 1.16.0\n helm.sh/chart: postgres-0.1.16\n name: plural-airbyte\n namespace: airbyte\n resourceVersion: \"966219670\"\n uid: 40c1d314-f667-421e-a059-f4521e8eb811\nspec:\n clone:\n cluster: plural-airbyte # notice this is the same as `metadata.name`\n s3_access_key_id: AWS_ACCESS_KEY_ID\n s3_secret_access_key: AWS_SECRET_ACCESS_KEY\n timestamp: \"2022-10-31T19:00:00+04:00\"\n uid: SOME-UUID\n databases:\n airbyte: airbyte\n numberOfInstances: 2\n postgresql:\n parameters:\n max_connections: \"101\"\n version: \"13\"\n resources:\n limits:\n cpu: \"2\"\n memory: 1Gi\n requests:\n cpu: 50m\n memory: 100Mi\n teamId: plural\n users:\n airbyte:\n - superuser\n - createdb\n volume:\n size: 27Gi\n```\n* `kubectl apply -f db.yaml` - reapply the database to k8s using the file you were working on\n\nPostgres will perform the backup w/in the postgres pod itself, so if you want to track its status, you can look at the pods with:\n\n```sh\nkubectl logs DB-NAME-0 -n NAMESPACE # inject whatever the name of the postgres db you created and the namespace it was applied to\n```\n\n### Clone from another cluster\n\nThis is really useful for creating hot standbys or recreating a cluster entirely if you say want to change the underlying storage class of the db. The preparation is similar to above:\n\n* dump the db to a file with `kubectl get postgresql -n -o yaml > db.yaml`\n* edit the file and add a clone block like:\n\n```yaml\napiVersion: acid.zalan.do/v1\nkind: postgresql\nmetadata:\n annotations:\n meta.helm.sh/release-name: airbyte\n meta.helm.sh/release-namespace: airbyte\n creationTimestamp: \"2022-10-31T23:53:27Z\"\n generation: 6\n labels:\n app.kubernetes.io/instance: airbyte\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/name: postgres\n app.kubernetes.io/version: 1.16.0\n helm.sh/chart: postgres-0.1.16\n name: plural-airbyte\n namespace: airbyte\n resourceVersion: \"966219670\"\n uid: 40c1d314-f667-421e-a059-f4521e8eb811\nspec:\n clone:\n cluster: plural-airbyte-old # notice no timestamp or uid triggers a pg_basebackup from a running cluster\n databases:\n airbyte: airbyte\n numberOfInstances: 2\n postgresql:\n parameters:\n max_connections: \"101\"\n version: \"13\"\n resources:\n limits:\n cpu: \"2\"\n memory: 1Gi\n requests:\n cpu: 50m\n memory: 100Mi\n teamId: plural\n users:\n airbyte:\n - superuser\n - createdb\n volume:\n size: 27Gi\n```\n* `kubectl apply -f db.yaml`\n* same as above, you can track the clone via `kubectl logs ...`\n\n", - "source_links": [], - "id": 97 - }, - { - "page_link": "https://github.com/PostHog/posthog", - "title": "posthog readme", - "text": "

\n \"posthoglogo\"\n

\n

\n \n\n\n PRs Welcome\n \"Join\n \"Docker\n \"GitHub\n \"GitHub\n

\n\n

\n Docs - Using PostHog - Support community - Roadmap - Bug report\n

\n\n## PostHog is an open-source suite of product and data tools, built for engineers\n\n- Specify events manually, or use autocapture to get started quickly\n- Analyze your data with visualizations and session recordings\n- Improve your product with A/B testing and feature flags\n- Keep control over your data by deploying PostHog on your infrastructure\n- Use apps to connect to external services and manage data flows\n\n## Table of Contents\n\n- [Get started for free](#get-started-for-free)\n- [Features](#features)\n- [Docs and support](#docs-and-support)\n- [Contributing](#contributing)\n- [Philosophy](#philosophy)\n- [Open-source vs paid](#open-source-vs-paid)\n\n## Get started for free\n\n### PostHog Cloud\n\nThe fastest and most reliable way to get started with PostHog is signing up for free to\u00a0[PostHog Cloud](https://app.posthog.com/signup) or [PostHog Cloud EU](https://eu.posthog.com/signup)\n\n### Open-source hobby deploy\n\nDeploy a hobby instance in one line on Linux with Docker (recommended 4GB memory):\n\n\n ```bash \n /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/posthog/posthog/HEAD/bin/deploy-hobby)\" \n ``` \n\nGood for <100K events ingested monthly. See our [docs for more info and limitations](https://posthog.com/docs/self-host/open-source/deployment).\n\n### Enterprise self-hosted\n\nSee our [enterprise self-hosted docs](https://posthog.com/docs/self-host/enterprise/overview) to deploy a scalable, production-ready instance with support from our team.\n\n## Features\n![ui-demo](https://user-images.githubusercontent.com/85295485/144591577-fe97e4a5-5631-4a60-a684-45caf421507f.gif)\n\nWe bring together all the tools and data in one place to help you build better products\n\n### Product analytics and optimization\n\n- **Event-based analytics:** capture your product's usage [automatically](https://posthog.com/docs/integrate/client/js#autocapture), or [customize](https://posthog.com/docs/integrate) it to your needs\n- **User and group tracking:** understand the [people](https://posthog.com/manual/persons) and [groups](https://posthog.com/manual/group-analytics) behind the events and track properties about them\n- **Data visualizations:** create and share [graphs](https://posthog.com/docs/features/trends), [funnels](https://posthog.com/docs/features/funnels), [paths](https://posthog.com/docs/features/paths), [retention](https://posthog.com/docs/features/retention), and [dashboards](https://posthog.com/docs/features/dashboards)\n- **Session recording:** [watch videos](https://posthog.com/docs/features/session-recording) of your users' behavior, with fine-grained filters and privacy controls\n- **Heatmaps:** see where users are using your product with the [PostHog Toolbar](https://posthog.com/docs/features/toolbar)\n- **Feature flags:** test and manage the rollout of new features, target specific users and groups\n- **A/B and multi-variate testing:** run simple or complex changes as [experiments](https://posthog.com/manual/experimentation) and get automatic significance calculations\n- **Correlation analysis:** discover what events and properties [correlate](https://posthog.com/manual/correlation) with success and failure\n\n### Data and infrastructure tools\n\n- **Complete control over your data:** [host it yourself](https://posthog.com/docs/self-host/overview#deploy) on any infrastructure\n- **Import and export your data:** import from and export to the services that matter to you with [apps](https://posthog.com/apps)\n- **Ready-made libraries:** we\u2019ve built libraries for [JavaScript](https://posthog.com/docs/integrations/js-integration), [Python](https://posthog.com/docs/integrations/python-integration), [Ruby](https://posthog.com/docs/integrations/ruby-integration), [Node](https://posthog.com/docs/integrations/node-integration), [Go](https://posthog.com/docs/integrations/go-integration), [Android](https://posthog.com/docs/integrations/android-integration), [iOS](https://posthog.com/docs/integrations/ios-integration), [PHP](https://posthog.com/docs/integrations/php-integration), [Flutter](https://posthog.com/docs/integrations/flutter-integration), [React Native](https://posthog.com/docs/integrations/react-native-integration), [Elixir](https://posthog.com/docs/integrations/elixir-integration), [Nim](https://github.com/Yardanico/posthog-nim), and an [API](https://posthog.com/docs/integrations/api) for anything else\n- **Plays nicely with data warehouses:** import events or user data from your warehouse by writing a simple transformation plugin, and export data with pre-built apps - such as [BigQuery](https://posthog.com/apps/bigquery-export), [Redshift](https://posthog.com/apps/redshift-export), [Snowflake](https://posthog.com/apps/snowflake-export), and [S3](https://posthog.com/apps/s3-expo)\n\n[Read a full list of PostHog features](https://posthog.com/product).\n\n## Docs and support\n\nRead how to [deploy](https://posthog.com/docs/self-host), [integrate](https://posthog.com/docs/integrate), and [extend](https://posthog.com/docs/apps) PostHog in our [documentation](https://posthog.com/docs).\n\nCheck out our [tutorials](https://posthog.com/docs/apps) for step-by-step guides, how-to's, and best practices.\n\nLearn more about getting the most out of PostHog's features in [our product manual](https://posthog.com/using-posthog).\n\n[Ask a question](https://posthog.com/questions) or join our [Slack community](https://posthog.com/slack) to get support.\n\n## Contributing\n\nWe <3 contributions big and small. In priority order (although everything is appreciated) with the most helpful first:\n\n- Give us feedback in our [Slack community](https://posthog.com/slack)\n- Vote on features or get early access to beta functionality in our [roadmap](https://posthog.com/roadmap)\n- Open a PR (see our instructions on [developing PostHog locally](https://posthog.com/handbook/engineering/developing-locally))\n- Submit a [feature request](https://github.com/PostHog/posthog/issues/new?assignees=&labels=enhancement%2C+feature&template=feature_request.md) or [bug report](https://github.com/PostHog/posthog/issues/new?assignees=&labels=bug&template=bug_report.md)\n\n## Philosophy\n\nOur mission is to\u00a0increase the number of successful products\u00a0in the world. To do that, we build product and data tools that help you understand user behavior without losing control of your data.\n\nIn our view, third-party analytics tools do not work in a world of cookie deprecation, GDPR, HIPAA, CCPA, and many other four-letter acronyms. PostHog is the alternative to sending all of your customers' personal information and usage data to third-parties.\n\nPostHog gives you every tool you need to understand user behavior, develop and test improvements, and release changes to make your product more successful.\n\nPostHog operates in public as much as possible. We detail how we work and our learning on building and running a fast-growing, product-focused startup in our [handbook](https://posthog.com/handbook/getting-started/start-here).\n\n## Open-source vs. paid\n\nThis repo is available under the [MIT expat license](https://github.com/PostHog/posthog/blob/master/LICENSE), except for the `ee` directory (which has it's [license here](https://github.com/PostHog/posthog/blob/master/ee/LICENSE)) if applicable. \n\nNeed *absolutely \ud83d\udcaf% FOSS*? Check out our [posthog-foss](https://github.com/PostHog/posthog-foss) repository, which is purged of all proprietary code and features.\n\nUsing premium features (contained in the `ee` directory) with a self-hosted instance require a PostHog license. To learn more, [book a demo](https://posthog.com/book-a-demo) or see our [pricing page](https://posthog.com/pricing).\n\n### We\u2019re hiring!\n\nCome help us make PostHog even better. We're growing fast [and would love for you to join us](https://posthog.com/careers).\n\n## Contributors \ud83e\uddb8\n\n[//]: contributor-faces\n\n\n\n\n \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/ \"\"/\"\"/\n\n\n\n\n", - "source_links": [], - "id": 98 - }, - { - "page_link": "https://github.com/PrefectHQ/prefect", - "title": "prefect readme", - "text": "

\n\n

\n \n \"PyPI\"\n \n \n \n \n \n \n
\n \n \n \n \n \n \n

\n\n# Prefect\n\nPrefect is an orchestrator for data-intensive workflows. It's the simplest way to transform any Python function into a unit of work that can be observed and orchestrated. With Prefect, you can build resilient, dynamic workflows that react to the world around them and recover from unexpected changes. With just a few decorators, Prefect supercharges your code with features like automatic retries, distributed execution, scheduling, caching, and much more. Every activity is tracked and can be monitored with the Prefect server or Prefect Cloud dashboard.\n\n```python\nfrom prefect import flow, task\nfrom typing import List\nimport httpx\n\n\n@task(retries=3)\ndef get_stars(repo: str):\n url = f\"https://api.github.com/repos/{repo}\"\n count = httpx.get(url).json()[\"stargazers_count\"]\n print(f\"{repo} has {count} stars!\")\n\n\n@flow(name=\"GitHub Stars\")\ndef github_stars(repos: List[str]):\n for repo in repos:\n get_stars(repo)\n\n\n# run the flow!\ngithub_stars([\"PrefectHQ/Prefect\"])\n```\n\nAfter running some flows, fire up the Prefect UI to see what happened:\n\n```bash\nprefect server start\n```\n\n![](/docs/img/ui/flow-run-page.png)\n\nFrom here, you can continue to use Prefect interactively or [deploy your flows](https://docs.prefect.io/concepts/deployments) to remote environments, running on a scheduled or event-driven basis.\n\n## Getting Started\n\nPrefect requires Python 3.7 or later. To [install Prefect](https://docs.prefect.io/getting-started/installation/), run the following command in a shell or terminal session:\n\n```bash\npip install prefect\n```\n\nStart by then exploring the [core concepts of Prefect workflows](https://docs.prefect.io/concepts/), then follow one of our [friendly tutorials](https://docs.prefect.io/tutorials/first-steps) to learn by example.\n\n## Join the community\n\nPrefect is made possible by the fastest growing community of thousands of friendly data engineers. Join us in building a new kind of workflow system. The [Prefect Slack community](https://prefect.io/slack) is a fantastic place to learn more about Prefect, ask questions, or get help with workflow design. The [Prefect Discourse](https://discourse.prefect.io/) is a community-driven knowledge base to find answers to your Prefect-related questions. All community forums, including code contributions, issue discussions, and slack messages are subject to our [Code of Conduct](https://discourse.prefect.io/faq).\n\n## Contribute\n\nSee our [documentation on contributing to Prefect](https://docs.prefect.io/contributing/overview/).\n\nThanks for being part of the mission to build a new kind of workflow system and, of course, **happy engineering!**\n", - "source_links": [], - "id": 99 - }, - { - "page_link": "https://github.com/PrefectHQ/prefect", - "title": "prefect-agent readme", - "text": "

\n\n

\n \n \"PyPI\"\n \n \n \n \n \n \n
\n \n \n \n \n \n \n

\n\n# Prefect\n\nPrefect is an orchestrator for data-intensive workflows. It's the simplest way to transform any Python function into a unit of work that can be observed and orchestrated. With Prefect, you can build resilient, dynamic workflows that react to the world around them and recover from unexpected changes. With just a few decorators, Prefect supercharges your code with features like automatic retries, distributed execution, scheduling, caching, and much more. Every activity is tracked and can be monitored with the Prefect server or Prefect Cloud dashboard.\n\n```python\nfrom prefect import flow, task\nfrom typing import List\nimport httpx\n\n\n@task(retries=3)\ndef get_stars(repo: str):\n url = f\"https://api.github.com/repos/{repo}\"\n count = httpx.get(url).json()[\"stargazers_count\"]\n print(f\"{repo} has {count} stars!\")\n\n\n@flow(name=\"GitHub Stars\")\ndef github_stars(repos: List[str]):\n for repo in repos:\n get_stars(repo)\n\n\n# run the flow!\ngithub_stars([\"PrefectHQ/Prefect\"])\n```\n\nAfter running some flows, fire up the Prefect UI to see what happened:\n\n```bash\nprefect server start\n```\n\n![](/docs/img/ui/flow-run-page.png)\n\nFrom here, you can continue to use Prefect interactively or [deploy your flows](https://docs.prefect.io/concepts/deployments) to remote environments, running on a scheduled or event-driven basis.\n\n## Getting Started\n\nPrefect requires Python 3.7 or later. To [install Prefect](https://docs.prefect.io/getting-started/installation/), run the following command in a shell or terminal session:\n\n```bash\npip install prefect\n```\n\nStart by then exploring the [core concepts of Prefect workflows](https://docs.prefect.io/concepts/), then follow one of our [friendly tutorials](https://docs.prefect.io/tutorials/first-steps) to learn by example.\n\n## Join the community\n\nPrefect is made possible by the fastest growing community of thousands of friendly data engineers. Join us in building a new kind of workflow system. The [Prefect Slack community](https://prefect.io/slack) is a fantastic place to learn more about Prefect, ask questions, or get help with workflow design. The [Prefect Discourse](https://discourse.prefect.io/) is a community-driven knowledge base to find answers to your Prefect-related questions. All community forums, including code contributions, issue discussions, and slack messages are subject to our [Code of Conduct](https://discourse.prefect.io/faq).\n\n## Contribute\n\nSee our [documentation on contributing to Prefect](https://docs.prefect.io/contributing/overview/).\n\nThanks for being part of the mission to build a new kind of workflow system and, of course, **happy engineering!**\n", - "source_links": [], - "id": 100 - }, - { - "page_link": "https://github.com/PrefectHQ/prefect", - "title": "prefect-worker readme", - "text": "

\n\n

\n \n \"PyPI\"\n \n \n \n \n \n \n
\n \n \n \n \n \n \n

\n\n# Prefect\n\nPrefect is an orchestrator for data-intensive workflows. It's the simplest way to transform any Python function into a unit of work that can be observed and orchestrated. With Prefect, you can build resilient, dynamic workflows that react to the world around them and recover from unexpected changes. With just a few decorators, Prefect supercharges your code with features like automatic retries, distributed execution, scheduling, caching, and much more. Every activity is tracked and can be monitored with the Prefect server or Prefect Cloud dashboard.\n\n```python\nfrom prefect import flow, task\nfrom typing import List\nimport httpx\n\n\n@task(retries=3)\ndef get_stars(repo: str):\n url = f\"https://api.github.com/repos/{repo}\"\n count = httpx.get(url).json()[\"stargazers_count\"]\n print(f\"{repo} has {count} stars!\")\n\n\n@flow(name=\"GitHub Stars\")\ndef github_stars(repos: List[str]):\n for repo in repos:\n get_stars(repo)\n\n\n# run the flow!\ngithub_stars([\"PrefectHQ/Prefect\"])\n```\n\nAfter running some flows, fire up the Prefect UI to see what happened:\n\n```bash\nprefect server start\n```\n\n![](/docs/img/ui/flow-run-page.png)\n\nFrom here, you can continue to use Prefect interactively or [deploy your flows](https://docs.prefect.io/concepts/deployments) to remote environments, running on a scheduled or event-driven basis.\n\n## Getting Started\n\nPrefect requires Python 3.7 or later. To [install Prefect](https://docs.prefect.io/getting-started/installation/), run the following command in a shell or terminal session:\n\n```bash\npip install prefect\n```\n\nStart by then exploring the [core concepts of Prefect workflows](https://docs.prefect.io/concepts/), then follow one of our [friendly tutorials](https://docs.prefect.io/tutorials/first-steps) to learn by example.\n\n## Join the community\n\nPrefect is made possible by the fastest growing community of thousands of friendly data engineers. Join us in building a new kind of workflow system. The [Prefect Slack community](https://prefect.io/slack) is a fantastic place to learn more about Prefect, ask questions, or get help with workflow design. The [Prefect Discourse](https://discourse.prefect.io/) is a community-driven knowledge base to find answers to your Prefect-related questions. All community forums, including code contributions, issue discussions, and slack messages are subject to our [Code of Conduct](https://discourse.prefect.io/faq).\n\n## Contribute\n\nSee our [documentation on contributing to Prefect](https://docs.prefect.io/contributing/overview/).\n\nThanks for being part of the mission to build a new kind of workflow system and, of course, **happy engineering!**\n", - "source_links": [], - "id": 101 - }, - { - "page_link": "https://github.com/rabbitmq/rabbitmq-server", - "title": "rabbitmq readme", - "text": "[![Test](https://github.com/rabbitmq/rabbitmq-server/actions/workflows/test.yaml/badge.svg)](https://github.com/rabbitmq/rabbitmq-server/actions/workflows/test.yaml)\n\n# RabbitMQ Server\n\n[RabbitMQ](https://rabbitmq.com) is a [feature rich](https://rabbitmq.com/documentation.html),\nmulti-protocol messaging and streaming broker. It supports:\n\n * AMQP 0-9-1\n * AMQP 1.0\n * [RabbitMQ Stream Protocol](https://rabbitmq.com/streams.html)\n * MQTT 3.1.1\n * STOMP 1.0 through 1.2\n\n\n## Installation\n\n * [Installation guides](https://rabbitmq.com/download.html) for various platforms\n * [Kubernetes Cluster Operator](https://rabbitmq.com/kubernetes/operator/operator-overview.html)\n * [Changelog](https://www.rabbitmq.com/changelog.html)\n * [Releases](https://github.com/rabbitmq/rabbitmq-server/releases) on GitHub\n * [Currently supported released series](https://www.rabbitmq.com/versions.html)\n * [Supported Erlang versions](https://www.rabbitmq.com/which-erlang.html)\n\n\n## Tutorials and Documentation\n\n * [RabbitMQ tutorials](https://rabbitmq.com/getstarted.html)\n * [All documentation guides](https://rabbitmq.com/documentation.html)\n * [RabbitMQ blog](https://blog.rabbitmq.com/)\n\nSome key doc guides include\n\n * [CLI tools guide](https://rabbitmq.com/cli.html) \n * [Clustering](https://www.rabbitmq.com/clustering.html) and [Cluster Formation](https://www.rabbitmq.com/cluster-formation.html) guides\n * [Configuration guide](https://rabbitmq.com/configure.html) \n * [Client libraries and tools](https://rabbitmq.com/devtools.html)\n * [Monitoring](https://rabbitmq.com/monitoring.html) and [Prometheus/Grafana](https://www.rabbitmq.com/prometheus.html) guides\n * [Kubernetes Cluster Operator](https://rabbitmq.com/kubernetes/operator/operator-overview.html)\n * [Production checklist](https://rabbitmq.com/production-checklist.html)\n * [Quorum queues](https://rabbitmq.com/quorum-queues.html): a replicated, data safety- and consistency-oriented queue type\n * [Streams](https://rabbitmq.com/streams.html): a persistent and replicated append-only log with non-destructive consumer semantics\n * [Runnable tutorials](https://github.com/rabbitmq/rabbitmq-tutorials/)\n\nRabbitMQ documentation is also [developed on GitHub](https://github.com/rabbitmq/rabbitmq-website/).\n\n## Commercial Features and Support\n\n * [Commercial edition of RabbitMQ](https://www.vmware.com/products/rabbitmq.html)\n * [Commercial edition for Kubernetes](https://rabbitmq.com/kubernetes/tanzu/installation.html)\n * [Commercial support](https://rabbitmq.com/services.html) from [VMware](https://vmware.com) for open source RabbitMQ\n\n## Getting Help from the Community\n\n * [Community Discord server](https://rabbitmq.com/discord/)\n * [Community Slack](https://rabbitmq.com/slack/)\n * [GitHub Discussions](https://github.com/rabbitmq/rabbitmq-server/discussions/)\n * [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users)\n * `#rabbitmq` on [Libera Chat](https://libera.chat/)\n\n\n## Contributing\n\nSee [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://rabbitmq.com/github.html).\n\nQuestions about contributing, internals and so on are very welcome on the [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).\n\n\n## Licensing\n\nRabbitMQ server is [licensed under the MPL 2.0](LICENSE-MPL-RabbitMQ).\n\n\n## Building From Source and Packaging\n\n * [Contributor resources](https://github.com/rabbitmq/contribute)\n * [Building RabbitMQ from Source](https://rabbitmq.com/build-server.html)\n * [Building RabbitMQ Distribution Packages](https://rabbitmq.com/build-server.html)\n\n\n## Copyright\n\n(c) 2007-2023 VMware, Inc. or its affiliates.\n", - "source_links": [], - "id": 102 - }, - { - "page_link": "https://github.com/ray-project/ray", - "title": "ray readme", - "text": null, - "source_links": [], - "id": 103 - }, - { - "page_link": null, - "title": "redash readme", - "text": null, - "source_links": [], - "id": 104 - }, - { - "page_link": "https://github.com/redis/redis", - "title": "redis readme", - "text": null, - "source_links": [], - "id": 105 - }, - { - "page_link": "external-hostname.md", - "title": "external-hostname", - "text": "# Expose Redis Outside Cluster\n\nWe ship redis with only internal cluster networking by default for security reasons, but you can still expose either the redis master or replicas externally from the cluster. The process simply involves editing the `context.yaml` file at the root of your repo with:\n\n```\nconfiguration:\n ...\n redis:\n masterHostname: redis-master.CLUSTER-SUBDOMAIN\n replicaHostname: redis-replica.CLUSTER-SUBDOMAIN # if you want to expose the replica as well\n```\n\nyou can then simply run: `plural build --only redis && plural deploy --commit \"expose redis\"`", - "source_links": [], - "id": 106 - }, - { - "page_link": "https://github.com/redpanda-data/redpanda", - "title": "redpanda readme", - "text": null, - "source_links": [], - "id": 107 - }, - { - "page_link": "https://github.com/stakater/Reloader", - "title": "reloader readme", - "text": "# ![](assets/web/reloader-round-100px.png) RELOADER\n\n[![Go Report Card](https://goreportcard.com/badge/github.com/stakater/reloader?style=flat-square)](https://goreportcard.com/report/github.com/stakater/reloader)\n[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](http://godoc.org/github.com/stakater/reloader)\n[![Release](https://img.shields.io/github/release/stakater/reloader.svg?style=flat-square)](https://github.com/stakater/reloader/releases/latest)\n[![GitHub tag](https://img.shields.io/github/tag/stakater/reloader.svg?style=flat-square)](https://github.com/stakater/reloader/releases/latest)\n[![Docker Pulls](https://img.shields.io/docker/pulls/stakater/reloader.svg?style=flat-square)](https://hub.docker.com/r/stakater/reloader/)\n[![Docker Stars](https://img.shields.io/docker/stars/stakater/reloader.svg?style=flat-square)](https://hub.docker.com/r/stakater/reloader/)\n[![license](https://img.shields.io/github/license/stakater/reloader.svg?style=flat-square)](LICENSE)\n[![Get started with Stakater](https://stakater.github.io/README/stakater-github-banner.png)](http://stakater.com/?utm_source=Reloader&utm_medium=github)\n\n## Problem\n\nWe would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset`, `Statefulset` and `Rollout`\n\n## Solution\n\nReloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.\n\n## Compatibility\n\nReloader is compatible with kubernetes >= 1.9\n\n## How to use Reloader\n\nFor a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap` or `Secret` called `foo-secret` or both. Then add your annotation (by default `reloader.stakater.com/auto`) to main metadata of your `Deployment`\n\n```yaml\nkind: Deployment\nmetadata:\n annotations:\n reloader.stakater.com/auto: \"true\"\nspec:\n template:\n metadata:\n```\n\nThis will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.\n\nYou can restrict this discovery to only `ConfigMap` or `Secret` objects that\nare tagged with a special annotation. To take advantage of that, annotate\nyour deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:\n\n```yaml\nkind: Deployment\nmetadata:\n annotations:\n reloader.stakater.com/search: \"true\"\nspec:\n template:\n```\n\nand Reloader will trigger the rolling upgrade upon modification of any\n`ConfigMap` or `Secret` annotated like this:\n\n```yaml\nkind: ConfigMap\nmetadata:\n annotations:\n reloader.stakater.com/match: \"true\"\ndata:\n key: value\n```\n\nprovided the secret/configmap is being used in an environment variable, or a\nvolume mount.\n\nPlease note that `reloader.stakater.com/search` and\n`reloader.stakater.com/auto` do not work together. If you have the\n`reloader.stakater.com/auto: \"true\"` annotation on your deployment, then it\nwill always restart upon a change in configmaps or secrets it uses, regardless\nof whether they have the `reloader.stakater.com/match: \"true\"` annotation or\nnot.\n\nWe can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deploymentconfig, deployment, daemonset, statefulset or rollout.\nTo do this either set the auto annotation to `\"false\"` (`reloader.stakater.com/auto: \"false\"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)\n\n### Configmap\n\nTo perform rolling upgrade when change happens only on specific configmaps use below annotation.\n\nFor a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`\n\n```yaml\nkind: Deployment\nmetadata:\n annotations:\n configmap.reloader.stakater.com/reload: \"foo-configmap\"\nspec:\n template:\n metadata:\n```\n\nUse comma separated list to define multiple configmaps.\n\n```yaml\nkind: Deployment\nmetadata:\n annotations:\n configmap.reloader.stakater.com/reload: \"foo-configmap,bar-configmap,baz-configmap\"\nspec:\n template: \n metadata:\n```\n\n### Secret\n\nTo perform rolling upgrade when change happens only on specific secrets use below annotation.\n\nFor a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`\n\n```yaml\nkind: Deployment\nmetadata:\n annotations:\n secret.reloader.stakater.com/reload: \"foo-secret\"\nspec:\n template: \n metadata:\n```\n\nUse comma separated list to define multiple secrets.\n\n```yaml\nkind: Deployment\nmetadata:\n annotations:\n secret.reloader.stakater.com/reload: \"foo-secret,bar-secret,baz-secret\"\nspec:\n template: \n metadata:\n```\n\n### NOTES\n\n- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.\n- For [rollouts](https://github.com/argoproj/argo-rollouts/) reloader simply triggers a change is up to you how you configure the rollout strategy.\n- `reloader.stakater.com/auto: \"true\"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`\n- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.\n- you may override the auto annotation with the `--auto-annotation` flag\n- you may override the search annotation with the `--auto-search-annotation` flag\n and the match annotation with the `--search-match-annotation` flag\n- you may override the configmap annotation with the `--configmap-annotation` flag\n- you may override the secret annotation with the `--secret-annotation` flag\n- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag\n- you may want to prevent watching certain resources with the `--resources-to-ignore` flag\n- you can configure logging in JSON format with the `--log-format=json` option\n- you can configure the \"reload strategy\" with the `--reload-strategy=` option (details below)\n\n## Reload Strategies\nReloader supports multiple \"reload\" strategies for performing rolling upgrades to resources. The following list describes them:\n- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers \n referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.).\n This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.\n- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation\n on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools\n to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its \n `configMap` or `secret` be deleted and recreated.\n This strategy can be specified with the `--reload-strategy=annotations` argument.\n \n\n## Deploying to Kubernetes\n\nYou can deploy Reloader by following methods:\n\n### Vanilla Manifests\n\nYou can apply vanilla manifests by changing `RELEASE-NAME` placeholder provided in manifest with a proper value and apply it by running the command given below:\n\n```bash\nkubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml\n```\n\nBy default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.\n\nReloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :\n\n| Args | Description |\n| -------------------------------- | -------------------- |\n| --resources-to-ignore=configMaps | To ignore configMaps |\n| --resources-to-ignore=secrets | To ignore secrets |\n\n`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.\n\n### Vanilla kustomize\n\nYou can also apply the vanilla manifests by running the following command\n\n```bash\nkubectl apply -k https://github.com/stakater/Reloader/deployments/kubernetes\n```\n\nSimilarly to vanilla manifests get deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.\n\n### Kustomize\n\nYou can write your own `kustomization.yaml` using ours as a 'base' and write patches to tweak the configuration.\n\n```yaml\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nbases:\n - https://github.com/stakater/Reloader/deployments/kubernetes\n\nnamespace: reloader\n```\n\n### Helm Charts\n\nAlternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3\n\n```bash\nhelm repo add stakater https://stakater.github.io/stakater-charts\n\nhelm repo update\n\nhelm install stakater/reloader # For helm3 add --generate-name flag or set the release name\n```\n\n**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.\n\n```bash\nhelm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name\n```\n\nReloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:\n\n| Parameter | Description | Type |\n| ---------------- | -------------------------------------------------------------- | ------- |\n| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |\n| ignoreConfigMaps | To ignore configMaps. Valid value are either `true` or `false` | boolean |\n\n`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.\n\nYou can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart\n\nYou can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.\n\n**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:\n\n| Parameter | Description | Type |\n| ---------------- |------------------------------------------------------------------------------| ------- |\n| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |\n| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |\n| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |\n\n## Help\n\n### Documentation\n\nYou can find more documentation [here](docs)\n\n### Have a question?\n\nFile a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us an [email](mailto:stakater@gmail.com).\n\n### Talk to us on Slack\n\nJoin and talk to us on Slack for discussing Reloader\n\n[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://slack.stakater.com/)\n[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater-community.slack.com/messages/CC5S05S12)\n\n## Contributing\n\n### Bug Reports & Feature Requests\n\nPlease use the [issue tracker](https://github.com/stakater/Reloader/issues) to report any bugs or file feature requests.\n\n### Developing\n\n1. Deploy Reloader.\n2. Run `okteto up` to activate your development container.\n3. `make build`.\n4. `./Reloader`\n\nPRs are welcome. In general, we follow the \"fork-and-pull\" Git workflow.\n\n1. **Fork** the repo on GitHub\n2. **Clone** the project to your own machine\n3. **Commit** changes to your own branch\n4. **Push** your work back up to your fork\n5. Submit a **Pull request** so that we can review your changes\n\nNOTE: Be sure to merge the latest from \"upstream\" before making a pull request!\n\n## Changelog\n\nView our closed [Pull Requests](https://github.com/stakater/Reloader/pulls?q=is%3Apr+is%3Aclosed).\n\n## License\n\nApache2 \u00a9 [Stakater](http://stakater.com)\n\n## About\n\n`Reloader` is maintained by [Stakater][website]. Like it? Please let us know at \n\nSee [our other projects][community]\nor contact us in case of professional services and queries on \n\n[website]: http://stakater.com/\n[community]: https://github.com/stakater/\n\n## Acknowledgements\n\n- [ConfigmapController](https://github.com/fabric8io/configmapcontroller); We documented here why we re-created [Reloader](docs/Reloader-vs-ConfigmapController.md)\n", - "source_links": [], - "id": 108 - }, - { - "page_link": null, - "title": "renovate readme", - "text": null, - "source_links": [], - "id": 109 - }, - { - "page_link": "https://github.com/mend/renovate-on-prem", - "title": "renovate-on-prem readme", - "text": "![WhiteSource Renovate banner](https://app.renovatebot.com/images/whitesource_renovate_660_220.jpg)\n\n# WhiteSource Renovate On-Premises\n\nThis repository contains Documentation, Release Notes and an Issue Tracker for WhiteSource Renovate On-Premises, which was originally known as \"Renovate Pro\".\n\n## Documentation\n\nPlease view [the markdown docs in this repository](https://github.com/whitesource/renovate-on-prem/tree/main/docs).\n\n## Download\n\nWhiteSource Renovate is distributed via Docker Hub using the namespace [whitesource/renovate](https://hub.docker.com/r/whitesource/renovate).\n\n## License\n\nUse of WhiteSource Renovate On-Premises is bound by [WhiteSource's Terms of Service](https://renovate.whitesourcesoftware.com/terms-of-service/). You can request a license key by submitting the form at [https://renovate.whitesourcesoftware.com/on-premises/](https://renovate.whitesourcesoftware.com/on-premises/). License requests are processed semi-manually so please allow up to 3 working days to receive your license key by email.\n\nThe documentation and examples in this repository are MIT-licensed.\n\n## Usage\n\nPlease see the `docs/` and `examples/` directories within this repository.\n", - "source_links": [], - "id": 110 - }, - { - "page_link": "https://github.com/tryretool/retool-onpremise", - "title": "retool readme", - "text": "

\n \"Retool
\n Build internal tools, remarkably fast.\n


\n\n# Deploying Retool on-premise\n\n[Deploying Retool on-premise](https://docs.retool.com/docs/self-hosted) ensures that all access to internal data is managed within your own cloud environment. It also provides the flexibility to control how Retool is setup within your infrastructure, the ability to configure logging, and access to enable custom SAML SSO using providers like Okta and Active Directory.\n\n## Table of contents\n\n- [Select a Retool version number](#select-a-retool-version-number)\n- [One-Click Deploy](#one-click-deploy)\n - [AWS](#one-click-deployment-to-aws)\n - [Render](#one-click-deployment-to-render)\n- [Single deployments](#single-deployments)\n - [General Machine Specifications](#general-machine-specifications)\n - [AWS w/ EC2](#aws-deploy-with-ec2)\n - [GCP w/ Compute Engine VM](#gcp-deploy-with-compute-engine-virtual-machine)\n - [Azure w/ Azure VM](#azure-deploy-with-azure-virtual-machine)\n - [Heroku](#deploying-retool-on-heroku)\n - [Aptible](#deploying-retool-using-aptible)\n- [Managed deployments](#managed-deployments)\n - [General](#general-managed-deployments)\n - [Kubernetes](#deploying-on-kubernetes)\n - [Kubernetes + Helm](#deploying-on-kubernetes-with-helm)\n - [AWS](#amazon-web-services---managed-deployments)\n - [ECS](#deploying-on-ecs)\n - [ECS + Fargate](#deploying-on-ecs-with-fargate)\n\n- [Additional Resources](#additional-resources)\n - [Health check endpoint](#health-check-endpoint)\n - [Troubleshooting](#troubleshooting)\n - [Updating Retool](#updating-retool)\n - [Environment variables](#environment-variables)\n - [Deployment Health Checklist](#deployment-health-checklist)\n - [Docker cheatsheet](#docker-cheatsheet)\n\n## Select a Retool version number\n\nWe recommend you set your Retool deployment to a specific version of Retool (that is, a specific semver version number in the format `X.Y.Z`, instead of a tag name). This will help prevent unexpected behavior in your Retool instances. When you are ready to upgrade Retool, you can bump the version number to the specific new version you want.\n\nTo help you select a version, see our guide on [Retool Release Versions](https://docs.retool.com/docs/self-hosted-release-notes).\n\n## One-Click Deploy\n\n### One-click Deployment to AWS\n\nRegion name | Region code | Launch\n--- | --- | ---\nUS East (N. Virginia) | us-east-1 | [![Launch Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://us-east-1.console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks/quickcreate?templateURL=https://s3-external-1.amazonaws.com/cf-templates-x1ljyg3aygh-us-east-1/2021157Dqr-SSOPLaunchJuneo3g1bsca3hh&stackName=retool) \nUS West (N. California) |\tus-west-1 | [![Launch Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://us-west-1.console.aws.amazon.com/cloudformation/home?region=us-west-1#/stacks/quickcreate?templateURL=https://s3-external-1.amazonaws.com/cf-templates-x1ljyg3aygh-us-east-1/2021157Dqr-SSOPLaunchJuneo3g1bsca3hh&stackName=retool) \nEU (Ireland) |\teu-west-1 | [![Launch Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://eu-west-1.console.aws.amazon.com/cloudformation/home?region=eu-west-1#/stacks/quickcreate?templateURL=https://s3-external-1.amazonaws.com/cf-templates-x1ljyg3aygh-us-east-1/2021157Dqr-SSOPLaunchJuneo3g1bsca3hh&stackName=retool) \nAsia Pacific (Mumbai) |\tap-south-1 | [![Launch Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://ap-south-1.console.aws.amazon.com/cloudformation/home?region=ap-south-1#/stacks/quickcreate?templateURL=https://s3-external-1.amazonaws.com/cf-templates-x1ljyg3aygh-us-east-1/2021157Dqr-SSOPLaunchJuneo3g1bsca3hh&stackName=retool)\nUS East (Ohio) | us-east-2 | [![Launch Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://us-east-2.console.aws.amazon.com/cloudformation/home?region=us-east-2#/stacks/quickcreate?templateURL=https://s3-external-1.amazonaws.com/cf-templates-x1ljyg3aygh-us-east-1/2021157Dqr-SSOPLaunchJuneo3g1bsca3hh&stackName=retool) \nUS West (Oregon) |\tus-west-2 | [![Launch Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://us-west-2.console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/quickcreate?templateURL=https://s3-external-1.amazonaws.com/cf-templates-x1ljyg3aygh-us-east-1/2021157Dqr-SSOPLaunchJuneo3g1bsca3hh&stackName=retool) \nEU (Frankfurt) |\teu-central-1 | [![Launch Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://eu-central-1.console.aws.amazon.com/cloudformation/home?region=eu-central-1#/stacks/quickcreate?templateURL=https://s3-external-1.amazonaws.com/cf-templates-x1ljyg3aygh-us-east-1/2021157Dqr-SSOPLaunchJuneo3g1bsca3hh&stackName=retool) \n\n### One-click Deployment to Render\n\nJust use the Deploy to Render button below! Here are [some docs](https://render.com/docs/deploy-retool) on deploying Retool with Render.\n\n[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/render-examples/retool)\n\n## Single Deployments\n\n### General Machine Specifications\n- Linux Virtual Machine\n - Ubuntu `16.04` or higher\n- `2` vCPUs\n- `8` GiB + of Memory\n- `60` GiB + of Storage \n- Networking Requirements for Initial Setup:\n - `80` (http): for connecting to the server from the browser\n - `443` (https): for connecting to the server from the browser\n - `22` (SSH): To allow you to SSH into your instance and configure it\n - `3000` (Retool): This is the default port Retool runs on\n\n#### With Workflows\nIf your deployment contains [Retool Workflows](http://retool.com/products/workflows), you may need additional resourcing.\n\nWe recommend bumping up the cluster memory to at least `12` GiB of memory and `4` vCPUs.\n\n### AWS Deploy With EC2\n\nSpin up a new EC2 instance. If using AWS, use the following steps:\n\n1. Click **Launch Instance** from the EC2 dashboard.\n1. Click **Select** for an instance of Ubuntu `16.04` or higher.\n1. Select an instance type of at least `t3.medium` and click **Next**.\n1. Ensure you select the VPC that also includes the databases / API\u2019s you will want to connect to and click **Next**.\n1. Increase the storage size to `60` GB or higher and click **Next**.\n1. Optionally add some Tags (e.g. `app = retool`) and click **Next**. This makes it easier to find if you have a lot of instances.\n1. Set the network security groups for ports `80`, `443`, `22` and `3000`, with sources set to `0.0.0.0/0` and `::/0`, and click **Review and Launch**. We need to open ports `80` (http) and `443` (https) so you can connect to the server from a browser, as well as port `22` (ssh) so that you can ssh into the instance to configure it and run Retool. By default on a vanilla EC2, Retool will run on port `3000`.\n1. On the **Review Instance Launch** screen, click **Launch** to start your instance.\n1. If you're connecting to internal databases, whitelist the VPS's IP address in your database.\n1. From your command line tool, SSH into your EC2 instance.\n1. Run the command `git clone https://github.com/tryretool/retool-onpremise.git`.\n1. Run the command `cd retool-onpremise` to enter the cloned repository's directory.\n1. Edit the `Dockerfile` to set the version of Retool you want to install. To do this, replace `X.Y.Z` in `FROM tryretool/backend:X.Y.Z` with your desired version. See [Select a Retool version number](#select-a-retool-version-number) to help you choose a version.\n1. Run `./install.sh` to install Docker and Docker Compose.\n1. In your `docker.env` (this file is only created after running `./install.sh`) add the following:\n\n ```docker\n # License key granted to you by Retool\n LICENSE_KEY=YOUR_LICENSE_KEY\n\n # This is necessary if you plan on logging in before setting up https\n COOKIE_INSECURE=true\n ```\n\n1. Run `sudo docker-compose up -d` to start the Retool server.\n1. Run `sudo docker-compose ps` to make sure all the containers are up and running.\n1. Navigate to your server's IP address in a web browser. Retool should now be running on port `3000`.\n1. Click Sign Up, since we're starting from a clean slate. The first user to create an account on an instance becomes the administrator.\n\n### GCP Deploy With Compute Engine Virtual Machine\n\n1. Click the Compute Engine Resource from the GCP Dashboard and select VM Instances\n1. In the top menu, select \u2018Create Instance\u2019\n1. Create a new VM to these Specs\n - Ubuntu Operating System Version 16.04 LTS or higher\n - Storage Size 60 GB or higher\n - Ram 4 GB or Higher (e2-medium)\n - Optionally add Labels (eg app = retool)\n1. Create Instance\n1. Navigate via search to the VPC Network Firewall settings and be sure to add the following ports set to`0.0.0.0/0` and `::/0`\n - `80` (HTTP)\n - `443` (HTTPS)\n - `22` (SSH)\n - `3000` (Retool access in browser)\n1. If you're connecting to an internal database, be sure to whitelist the VPC\u2019s ip address in your DB\n1. SSH into your instance, or use the Google SSH Button to open a VM Terminal in a browser window.\n1. Run Command `git clone https://github.com/tryretool/retool-onpremise.git`\n1. Run Command `cd retool-onpremise`\n1. Edit the Dockerfile using VIM (or other text editor) to specify your desired version number of Retool. To do this, replace `X.Y.Z` in `FROM tryretool/backend:X.Y.Z` with your desired version. See\u00a0[Select a Retool version number](#select-a-retool-version-number)\u00a0to help you choose a version.\n1. Run Command `./install.sh` to install docker containers, docker, and docker-compose\n1. In your docker.env file (this file will only exist after step 11)\n - Add the license key from `my.retool.com` to replace `YOUR_LICENSE_KEY`\n - If you will need to access your instance before configuring HTTPS, you will need to uncomment the line `COOKIE_INSECURE=true`\n1. Run `sudo docker-compose up -d` to start the Retool docker containers\n1. Run `sudo docker-compose ps` to see container status and ensure all are running\n1. Navigate to your servers IP address or domain in a web browser. Retool will be running on `port 3000`\n1. Click Sign Up, since this is a brand new instance. The first user created will become the administrator\n\n### Azure Deploy with Azure Virtual Machine\n\n1. In the main Azure Portal select Virtual Machine under Azure Services\n1. Click the Create button and select Virtual Machine \n1. Select an image of Ubuntu 16.04 or higher\n1. For instance size, select `Standard_D2s_v3 - 2 vcpus, 8 GiB memory`\n1. Under the Networking tab, Ensure you select the same Virtual Network that also includes the databases / API\u2019s you will want to connect to and click\u00a0**Next**.\n1. Under the Networking tab, configure your network security group to contain the following ports. You may need to create a new Security group that contains these 4 ports (`80`,\u00a0`443`,\u00a0`22`\u00a0and\u00a0`3000`): \n - `80` (http) and `443` (https) for connecting to the server from a browser \n - `22` (ssh) to allow you to ssh into the instance and configure it\n - `3000` is the port that Retool runs on by default\n1. From your command line tool, SSH into your Azure instance.\n1. Run the command\u00a0`git clone https://github.com/tryretool/retool-onpremise.git`.\n1. Run the command\u00a0`cd retool-onpremise`\u00a0to enter the cloned repository's directory.\n1. Edit the\u00a0`Dockerfile`\u00a0to set the version of Retool you want to install. To do this, replace\u00a0`X.Y.Z`\u00a0in\u00a0`FROM tryretool/backend:X.Y.Z`\u00a0with your desired version. See\u00a0[Select a Retool version number](https://github.com/tryretool/retool-onpremise#select-a-retool-version-number)\u00a0to help you choose a version.\n1. Run\u00a0`./install.sh`\u00a0to install Docker and Docker Compose.\n1. In your\u00a0`docker.env`\u00a0(this file is only created after running\u00a0`./install.sh`) add the following:\n \n `# License key granted to you by Retool\n LICENSE_KEY=YOUR_LICENSE_KEY`\n \n `# This is necessary if you plan on logging in before setting up https\n COOKIE_INSECURE=true`\n \n1. Run\u00a0`sudo docker-compose up -d`\u00a0to start the Retool server.\n1. Run\u00a0`sudo docker-compose ps`\u00a0to make sure all the containers are up and running.\n1. Navigate to your server's IP address in a web browser. Retool should now be running on port\u00a0`3000`.\n1. Click Sign Up, since we're starting from a clean slate. The first user to create an account on an instance becomes the administrator.\n\n\n### General Single-Instance Deploy\n\n### Deploying Retool on Heroku\n\nYou can manually deploy to Heroku using the following steps:\n\n1. Install the Heroku CLI, and login. Documentation for this can be found here: \n1. Clone this repo `git clone https://github.com/tryretool/retool-onpremise`\n1. Change the working directory to the newly cloned repository `cd ./retool-onpremise`\n1. Create a new Heroku app with the stack set to `container` with `heroku create your-app-name --stack=container`\n1. Add a free database: `heroku addons:create heroku-postgresql:hobby-dev`\n1. In the `Settings` page of your Heroku app, add the following environment variables:\n 1. `NODE_ENV` - set to `production`\n 1. `HEROKU_HOSTED` set to `true`\n 1. `JWT_SECRET` - set to a long secure random string used to sign JSON Web Tokens\n 1. `ENCRYPTION_KEY` - a long secure random string used to encrypt database credentials\n 1. `USE_GCM_ENCRYPTION` set to `true` for authenticated encryption of secrets; if true, `ENCRYPTION_KEY` must be 24 bytes\n 1. `LICENSE_KEY` - your Retool license key\n 1. `PGSSLMODE` - set to `require`\n1. Push the code: `git push heroku master`\n\nTo lockdown the version of Retool used, just edit the first line under `./heroku/Dockerfile` to:\n\n```docker\nFROM tryretool/backend:X.Y.Z\n```\n\n### Deploying Retool using Aptible\n\n1. Add your public SSH key to your Aptible account through the Aptible dashboard\n1. Install the Aptible CLI, and login. Documentation for this can be found here: \n1. Clone this repo `git clone https://github.com/tryretool/retool-onpremise`\n1. Change the working directory to the newly cloned repository `cd ./retool-onpremise`\n1. Edit the `Dockerfile` to set the version of Retool you want to install. To do this, replace `X.Y.Z` in `FROM tryretool/backend:X.Y.Z` with your desired version. See [Select a Retool version number](#select-a-retool-version-number) to help you choose a version.\n1. Create a new Aptible app with `aptible apps:create your-app-name`\n1. Add a database: `aptible db:create your-database-name --type postgresql`\n1. Set your config variables (your database connection string will be in your Aptible Dashboard and you can parse out the individual values by following [these instructions](https://www.aptible.com/documentation/deploy/reference/databases/credentials.html#using-database-credentials)). Be sure to rename `EXPIRED-LICENSE-KEY-TRIAL` to the license key provided to you.\n1. If secrets need an authenticated encryption method, add `USE_GCM_ENCRYTPION=true` to the command below and change `ENCRYPTION_KEY=$(cat /dev/urandom | base64 | head -c 24)`\n\n ```yml\n aptible config:set --app your-app-name \\\n POSTGRES_DB=your-db \\\n POSTGRES_HOST=your-db-host \\\n POSTGRES_USER=your-user \\\n POSTGRES_PASSWORD=your-db-password \\\n POSTGRES_PORT=your-db-port \\\n POSTGRES_SSL_ENABLED=true \\\n FORCE_SSL=true \\\n NODE_ENV=production \\\n JWT_SECRET=$(cat /dev/urandom | base64 | head -c 256) \\\n ENCRYPTION_KEY=$(cat /dev/urandom | base64 | head -c 64) \\\n LICENSE_KEY=EXPIRED-LICENSE-KEY-TRIAL\n ```\n\n1. Set your git remote which you can find in the Aptible dashboard: `git remote add aptible your-git-url`\n1. Push the code: `git push aptible master`\n1. Create a default Aptible endpoint\n1. Navigate to your endpoint and sign up as a new user in your Retool instance\n\n## Managed deployments\n\nDeploy Retool on a managed service. We've provided some starter template files for Cloudformation setups (ECS + Fargate), Kubernetes, and Helm.\n\n### General Managed Deployments\n\n### Deploying on Kubernetes\n\n1. Navigate into the `kubernetes` directory\n1. Edit the `retool-container.yaml` and `retool-jobs-runner.yaml` files to set the version of Retool you want to install. To do this, replace `X.Y.Z` in `image: tryretool/backend:X.Y.Z` with your desired version. See [Select a Retool version number](#select-a-retool-version-number) to help you choose a version.\n1. Copy the `retool-secrets.template.yaml` file to `retool-secrets.yaml` and inside the `{{ ... }}` sections, replace with a suitable base64 encoded string.\n 1. To base64 encode your license key, run `echo -n | base64` in the command line. Be sure to add the `-n` character, as it removes the trailing newline character from the encoding.\n 1. If you do not wish to add google authentication, replace the templates with an empty string.\n 1. You will need a license key in order to proceed.\n1. Run `kubectl apply -f ./retool-secrets.yaml`\n1. Run `kubectl apply -f ./retool-postgres.yaml`\n1. Run `kubectl apply -f ./retool-container.yaml`\n1. Run `kubectl apply -f ./retool-jobs-runner.yaml`\n\nFor ease of use, this will create a postgres container with a persistent volume for the storage of Retool data. We recommend that you use a managed database service like RDS as a long-term solution. The application will be exposed on a public ip address on port 3000 - we leave it to the user to handle DNS and SSL.\n\nPlease note that by default Retool is configured to use Secure Cookies - that means that you will be unable to login unless https has been correctly setup.\n\nTo force Retool to send the auth cookies over HTTP, please set the `COOKIE_INSECURE` environment variable to `'true'` in `./retool-container.yaml`. Do this by adding the following two lines to the `env` section.\n\n```yaml\n - name: COOKIE_INSECURE\n value: 'true'\n```\n\nThen, to update the running deployment, run `$ kubectl apply -f ./retool-container.yaml`\n\n### Deploying on Kubernetes with Helm\n\nSee for full Helm chart documentation\nand instructions.\n\n### Amazon Web Services - Managed Deployments\n\n### Deploying on ECS\n\nWe provide a [template file](/cloudformation/retool.yaml) for you to get started deploying on ECS.\n\n1. In the ECS Dashboard, click **Create Cluster**\n1. Select `EC2 Linux + Networking` as the cluster template.\n1. In your instance configuration, enter the following:\n - Select **On-demand instance**\n - Select **t2.medium** as the instance type (or your desired instance size)\n - Choose how many instances you want to spin up\n - (Optional) Add key pair\n - Choose your existing VPC (or create a new one)\n - (Optional) Add tags\n - Enable CloudWatch container insights\n1. Select the VPC in which you\u2019d like to launch the ECS cluster; make sure that you select a [public subnet](https://stackoverflow.com/questions/48830793/aws-vpc-identify-private-and-public-subnet).\n1. Download the [retool.yaml](/cloudformation/retool.yaml) file, and add your license key and other relevant variables.\n1. Go to the AWS Cloudformation dashboard, and click **Create Stack with new resources \u2192 Upload a template file**. Upload your edited `retool.yaml` file.\n1. Then, enter the following parameters:\n - Cluster: the name of the ECS cluster you created earlier\n - DesiredCount: 2\n - Environment: staging\n - Force: false\n - Image: `tryretool/backend:X.Y.Z` (But replace `X.Y.Z` with your desired version. See [Select a Retool version number](#select-a-retool-version-number) to help you choose a version.)\n - MaximumPercent: 250\n - MinimumPercent: 50\n - SubnetId: Select 2 subnets in your VPC - make sure these subnets are public (have an internet gateway in their route table)\n - VPC ID: select the VPC you want to use\n1. Click through to create the stack; this could take up to 15 minutes; you can monitor the progress of the stack being created in the `Events` tab in Cloudformation\n1. After everything is complete, you should see all the resources with a `CREATE_COMPLETE` status.\n1. In the **Outputs** section within the CloudFormation dashboard, you should be able to find the ALB DNS URL. This is where Retool should be running.\n1. The backend tries to guess your domain to create invite links, but with a load balancer in front of Retool you'll need to set the `BASE_DOMAIN` environment variable to your fully qualified domain (i.e. `https://retool.company.com`). Docs [here](https://docs.retool.com/docs/environment-variables).\n\n#### OOM issues\n\nIf running into OOM issues (especially on larger instance sizes with >4 vCPUs)\n\n- Verify the issue by going into the ECS console and checking the Service Metrics. Ideally\n - Memory utilization should fall around 40% (20% - 60%)\n - CPU utilization should be close to zero (0% - 5%)\n- If the values fall outside these ranges, increase the CPU and memory allocation in `retool.yml`\n\n### Deploying on ECS with Fargate\n\nWe provide Fargate template files supporting [public](/cloudformation/fargate.yaml) and [private](/cloudformation/fargate.private.yaml) subnets.\n\n1. In the ECS Dashboard, click **Create Cluster**\n1. In **Step 1: Select a cluster template**, select `Networking Only (Powered by AWS Fargate)` as the cluster template.\n1. In **Step 2: Configure cluster**, be sure to enable CloudWatch Container Insights. This will help us monitor logs and the health of our deployment through CloudWatch.\n1. Download the [public](/cloudformation/fargate.yaml) or [private](/cloudformation/fargate.private.yaml) template file.\n1. Edit the template file to provide your license key and any required [environment variables](https://docs.retool.com/docs/environment-variables) (under the Environment key within the retool ContainerDefinitions). Do not modify the Parameters object on line 2 of the template file. CloudFormation will prompt for these values after you upload the template file.\n1. Go to the AWS CloudFormation dashboard, and click **Create Stack with new resources \u2192 Upload a template file**. Upload your edited `.yaml` file.\n1. Enter the following parameters:\n - Cluster: the name of the ECS cluster you created earlier\n - DesiredCount: 2\n - Environment: staging\n - Force: false\n - Image: `tryretool/backend:X.Y.Z` (But replace `X.Y.Z` with your desired version. See [Select a Retool version number](#select-a-retool-version-number) to help you choose a version.)\n - MaximumPercent: 250\n - MinimumPercent: 50\n - SubnetId: Select 2 subnets in your VPC - make sure these subnets are public (have an internet gateway in their route table)\n - VPC ID: select the VPC you want to use\n1. Click through to create the stack; this could take up to 15 minutes; you can monitor the progress of the stack being created in the `Events` tab in Cloudformation\n1. In the **Outputs** section, you should be able to find the ALB DNS URL.\n1. Currently the load balancer is listening on port 3000; to make it available on port 80 we have to go to the **EC2 dashboard \u2192 Load Balancers \u2192 Listeners** and click Edit to to change the port to 80.\n - If you get an error that your security group does not allow traffic on this listener port, you must add an inbound rule allowing HTTP on port 80.\n1. In the **Outputs** section within the CloudFormation dashboard, you should be able to find the ALB DNS URL. This is where Retool should be running.\n1. The backend tries to guess your domain to create invite links, but with a load balancer in front of Retool you'll need to set the `BASE_DOMAIN` environment variable to your fully qualified domain (i.e. `https://retool.company.com`). Docs [here](https://docs.retool.com/docs/environment-variables).\n\n### Google Cloud Platform - Managed Deployments\n\n\n## Additional Resources\n\n**For details on additional features like SAML SSO, gRPC, custom certs, and more, visit our [docs](https://docs.retool.com/docs).**\n\n### Environment Variables\n\nYou can set environment variables to enable custom functionality like [managing secrets](https://docs.retool.com/docs/secret-management-using-environment-variables), customizing logs, and much more. For a list of all environment variables visit our [docs](https://docs.retool.com/docs/environment-variables).\n\n### Health check endpoint\n\nRetool also has a health check endpoint that you can set up to monitor liveliness of Retool. You can configure your probe to make a `GET` request to `/api/checkHealth`.\n\n### Troubleshooting\n\n- On Kubernetes, I get the error `SequelizeConnectionError: password authentication failed for user \"...\"`\n - Make sure that the secrets that you encoded in base64 don't have trailing whitespace! You can use `kubectl exec printenv` to help debug this issue.\n - Run `echo -n | base64` in the command line. The `-n` character removes the trailing newline character from the encoding.\n- I can't seem to login? I keep getting redirected to the login page after signing in.\n - If you have not enabled SSL yet, you will need to add the line `COOKIE_INSECURE=true` to your `docker.env` file / environment configuration so that the authentication cookies can be sent over http. Make sure to run `sudo docker-compose up -d` after modifying the `docker.env` file.\n- `TypeError: Cannot read property 'licenseVerification' of null` or `TypeError: Cannot read property 'name' of null`\n - There is an issue with your license key. Double check that the license key is correct and that it has no trailing whitespaces.\n- I want to use a private IP of the machine, not the default public one\n - When you run\u00a0`./install.sh`, instead of just clicking enter, type in your private IP. If you want to change this after it has already been set, modify the\u00a0DOMAINS\u00a0variable in the\u00a0docker.env\u00a0file.\n\n\n### Updating Retool\n\nThe latest Retool releases can be pulled from Docker Hub. When you run an on-premise instance of Retool, you\u2019ll need to pull an updated image in order to get new features and fixes.\n\nSee more information on our different release channels and recommended update strategies in [our documentation](https://docs.retool.com/docs/updating-retool-on-premise#retool-release-versions).\n\n### Docker Compose deployments\n\nUpdate the version number in the first line of your `Dockerfile`.\n\n```docker\nFROM tryretool/backend:X.Y.Z\n```\n\nThen run the included update script `./update_retool.sh` from this directory.\n\n### Kubernetes deployments\n\nTo update Retool on Kubernetes, you can use the following command, replacing `X.Y.Z` with the version number or named tag that you\u2019d like to update to.\n\n```zsh\nkubectl set image deploy/api api=tryretool/backend:X.Y.Z\n```\n\n### Heroku deployments\n\nTo update a Heroku deployment that was created with the button above, you may first set up a `git` repo to push to Heroku\n\n```zsh\nheroku login\ngit clone https://github.com/tryretool/retool-onpremise\ncd retool-onpremise\nheroku git:remote -a YOUR_HEROKU_APP_NAME\n```\n\nTo update Retool (this will automatically fetch the latest version of Retool)\n\n```zsh\ngit commit --allow-empty -m 'Redeploying'\ngit push heroku master\n```\n\n### Deployment Health Checklist\n\n###### Overview\nWe recommend completing our Deployment Health Checklist to help you improve the stability and reliability of your Retool deployment.\n\nPlease fill out the checklist and share it with our team. This information will help us better understand your infrastructure so that we can support you through product changes, proactive outreach, and more informed support.\n\n###### Instructions\nMake a copy of the [Deployment Health Checklist](https://docs.google.com/spreadsheets/d/19XYpWTnYrvsllTuM2VQGFWLiXzMHmTNAzZKyWY-sfSU) for your Retool deployment. Add your company name to the document title for reference.\nFill out the requested information on the first and second tabs.\nShare your filled out with your Retool contact or support@retool.com. We will reference this in the event of any support conversations.\n\n\n\n### Docker cheatsheet\n\nBelow is a cheatsheet for useful Docker commands. Note that you may need to prefix them with `sudo`.\n\n| Command | Description |\n| ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |\n| `docker-compose up -d` | Builds, (re)creates, starts, and attaches to containers for a service. `-d`allows containers to run in background (detached). |\n| `docker-compose down` | Stops and remove containers and networks |\n| `docker-compose stop` | Stops containers, but does not remove them and their networks |\n| `docker ps -a` | Display all Docker containers |\n| `docker-compose ps -a` | Display all containers related to images declared in the `docker-compose` file. |\n| `docker logs -f ` | Stream container logs to stdout |\n| `docker exec -it psql -U -W ` | Runs `psql` inside a container |\n| `docker kill $(docker ps -q)` | Kills all running containers |\n| `docker rm $(docker ps -a -q)` | Removes all containers and networks |\n| `docker rmi -f $(docker images -q)` | Removes (and un-tags) all images from the host |\n| `docker volume rm $(docker volume ls -q)` | Removes all volumes and completely wipes any persisted data |\n", - "source_links": [], - "id": 111 - }, - { - "page_link": "https://github.com/rook/rook", - "title": "rook readme", - "text": "\"Rook\"\n\n[![CNCF Status](https://img.shields.io/badge/cncf%20status-graduated-blue.svg)](https://www.cncf.io/projects)\n[![GitHub release](https://img.shields.io/github/release/rook/rook/all.svg)](https://github.com/rook/rook/releases)\n[![Docker Pulls](https://img.shields.io/docker/pulls/rook/ceph)](https://hub.docker.com/u/rook)\n[![Go Report Card](https://goreportcard.com/badge/github.com/rook/rook)](https://goreportcard.com/report/github.com/rook/rook)\n[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1599/badge)](https://bestpractices.coreinfrastructure.org/projects/1599)\n[![Security scanning](https://github.com/rook/rook/actions/workflows/synk.yaml/badge.svg)](https://github.com/rook/rook/actions/workflows/synk.yaml)\n[![Slack](https://slack.rook.io/badge.svg)](https://slack.rook.io)\n[![Twitter Follow](https://img.shields.io/twitter/follow/rook_io.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=rook_io&user_id=788180534543339520)\n\n# What is Rook?\n\nRook is an open source **cloud-native storage orchestrator** for Kubernetes, providing the platform, framework, and support for a diverse set of storage solutions to natively integrate with cloud-native environments.\n\nRook turns storage software into self-managing, self-scaling, and self-healing storage services. It does this by automating deployment, bootstrapping, configuration, provisioning, scaling, upgrading, migration, disaster recovery, monitoring, and resource management. Rook uses the facilities provided by the underlying cloud-native container management, scheduling and orchestration platform to perform its duties.\n\nRook integrates deeply into cloud native environments leveraging extension points and providing a seamless experience for scheduling, lifecycle management, resource management, security, monitoring, and user experience.\n\nFor more details about the storage solutions currently supported by Rook, please refer to the [project status section](#project-status) below.\nWe plan to continue adding support for other storage systems and environments based on community demand and engagement in future releases. See our [roadmap](ROADMAP.md) for more details.\n\nRook is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF) as a [graduated](https://www.cncf.io/announcements/2020/10/07/cloud-native-computing-foundation-announces-rook-graduation/) level project. If you are a company that wants to help shape the evolution of technologies that are container-packaged, dynamically-scheduled and microservices-oriented, consider joining the CNCF. For details about who's involved and how Rook plays a role, read the CNCF [announcement](https://www.cncf.io/blog/2018/01/29/cncf-host-rook-project-cloud-native-storage-capabilities).\n\n## Getting Started and Documentation\n\nFor installation, deployment, and administration, see our [Documentation](https://rook.github.io/docs/rook/latest).\n\n## Contributing\n\nWe welcome contributions. See [Contributing](CONTRIBUTING.md) to get started.\n\n## Report a Bug\n\nFor filing bugs, suggesting improvements, or requesting new features, please open an [issue](https://github.com/rook/rook/issues).\n\n### Reporting Security Vulnerabilities\n\nIf you find a vulnerability or a potential vulnerability in Rook please let us know immediately at\n[cncf-rook-security@lists.cncf.io](mailto:cncf-rook-security@lists.cncf.io). We'll send a confirmation email to acknowledge your\nreport, and we'll send an additional email when we've identified the issues positively or\nnegatively.\n\nFor further details, please see the complete [security release process](SECURITY.md).\n\n## Contact\n\nPlease use the following to reach members of the community:\n\n- Slack: Join our [slack channel](https://slack.rook.io)\n- GitHub: Start a [discussion](https://github.com/rook/rook/discussions) or open an [issue](https://github.com/rook/rook/issues)\n- Twitter: [@rook_io](https://twitter.com/rook_io)\n- Security topics: [cncf-rook-security@lists.cncf.io](#reporting-security-vulnerabilities)\n\n### Community Meeting\n\nA regular community meeting takes place every other [Tuesday at 9:00 AM PT (Pacific Time)](https://zoom.us/j/392602367?pwd=NU1laFZhTWF4MFd6cnRoYzVwbUlSUT09).\nConvert to your [local timezone](http://www.thetimezoneconverter.com/?t=9:00&tz=PT%20%28Pacific%20Time%29).\n\nAny changes to the meeting schedule will be added to the [agenda doc](https://docs.google.com/document/d/1exd8_IG6DkdvyA0eiTtL2z5K2Ra-y68VByUUgwP7I9A/edit?usp=sharing) and posted to [Slack #announcements](https://rook-io.slack.com/messages/C76LLCEE7/).\n\nAnyone who wants to discuss the direction of the project, design and implementation reviews, or general questions with the broader community is welcome and encouraged to join.\n\n- Meeting link: \n- [Current agenda and past meeting notes](https://docs.google.com/document/d/1exd8_IG6DkdvyA0eiTtL2z5K2Ra-y68VByUUgwP7I9A/edit?usp=sharing)\n- [Past meeting recordings](https://www.youtube.com/playlist?list=PLP0uDo-ZFnQP6NAgJWAtR9jaRcgqyQKVy)\n\n## Project Status\n\nThe status of each storage provider supported by Rook can be found in the table below.\nEach API group is assigned its own individual status to reflect their varying maturity and stability.\nMore details about API versioning and status in Kubernetes can be found on the Kubernetes [API versioning page](https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-versioning), but the key difference between the statuses are summarized below:\n\n- **Alpha:** The API may change in incompatible ways in a later software release without notice, recommended for use only in short-lived testing clusters, due to increased risk of bugs and lack of long-term support.\n- **Beta:** Support for the overall features will not be dropped, though details may change. Support for upgrading or migrating between versions will be provided, either through automation or manual steps.\n- **Stable:** Features will appear in released software for many subsequent versions and support for upgrading between versions will be provided with software automation in the vast majority of scenarios.\n\n| Name | Details | API Group | Status |\n| ---- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------ |\n| Ceph | [Ceph](https://ceph.com/) is a distributed storage system that provides file, block and object storage and is deployed in large scale production clusters. | ceph.rook.io/v1 | Stable |\n\nThis repo is for the Ceph storage provider. The [Cassandra](https://github.com/rook/cassandra) and [NFS](https://github.com/rook/nfs) storage providers moved to a separate repo to allow for each [storage provider](https://rook.github.io/docs/rook/latest/storage-providers.html) to have an independent development and release schedule.\n\n### Official Releases\n\nOfficial releases of Rook can be found on the [releases page](https://github.com/rook/rook/releases).\nPlease note that it is **strongly recommended** that you use [official releases](https://github.com/rook/rook/releases) of Rook, as unreleased versions from the master branch are subject to changes and incompatibilities that will not be supported in the official releases.\nBuilds from the master branch can have functionality changed and even removed at any time without compatibility support and without prior notice.\n\n## Licensing\n\nRook is under the Apache 2.0 license.\n\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Frook%2Frook.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Frook%2Frook?ref=badge_large)\n", - "source_links": [], - "id": 112 - }, - { - "page_link": "https://github.com/getsentry/sentry", - "title": "sentry readme", - "text": "

\n

\n \n \"Sentry\"\n \n

\n

\n Users and logs provide clues. Sentry provides answers.\n

\n

\n\n# What's Sentry?\n\nSentry is a developer-first error tracking and performance monitoring platform that helps developers see what actually matters, solve quicker, and learn continuously about their applications.\n\n\n\n

\n \n \n \n \n

\n\n## Official Sentry SDKs\n\n - [JavaScript](https://github.com/getsentry/sentry-javascript)\n - [Electron](https://github.com/getsentry/sentry-electron/)\n - [React-Native](https://github.com/getsentry/sentry-react-native)\n - [Python](https://github.com/getsentry/sentry-python)\n - [Ruby](https://github.com/getsentry/sentry-ruby)\n - [PHP](https://github.com/getsentry/sentry-php)\n - [Laravel](https://github.com/getsentry/sentry-laravel)\n - [Go](https://github.com/getsentry/sentry-go)\n - [Rust](https://github.com/getsentry/sentry-rust)\n - [Java/Kotlin](https://github.com/getsentry/sentry-java)\n - [Objective-C/Swift](https://github.com/getsentry/sentry-cocoa)\n - [C\\#/F\\#](https://github.com/getsentry/sentry-dotnet)\n - [C/C++](https://github.com/getsentry/sentry-native)\n - [Dart](https://github.com/getsentry/sentry-dart)\n - [Perl](https://github.com/getsentry/perl-raven)\n - [Clojure](https://github.com/getsentry/sentry-clj/)\n - [Elixir](https://github.com/getsentry/sentry-elixir)\n - [Unity](https://github.com/getsentry/sentry-unity)\n - [Unreal Engine](https://github.com/getsentry/sentry-unreal)\n\n# Resources\n\n - [Documentation](https://docs.sentry.io/)\n - [Community](https://forum.sentry.io/) (Bugs, feature requests,\n general questions)\n - [Discord](https://discord.gg/PXa5Apfe7K)\n - [Contributing](https://docs.sentry.io/internal/contributing/)\n - [Bug Tracker](https://github.com/getsentry/sentry/issues)\n - [Code](https://github.com/getsentry/sentry)\n - [Transifex](https://www.transifex.com/getsentry/sentry/) (Translate\n Sentry\\!)\n", - "source_links": [], - "id": 113 - }, - { - "page_link": "https://github.com/drakkan/sftpgo/", - "title": "sftpgo readme", - "text": "# SFTPGo\n\n[![CI Status](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=main&event=push)](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=main&event=push)\n[![Code Coverage](https://codecov.io/gh/drakkan/sftpgo/branch/main/graph/badge.svg)](https://codecov.io/gh/drakkan/sftpgo/branch/main)\n[![License: AGPL-3.0-only](https://img.shields.io/badge/License-AGPLv3-blue.svg)](https://www.gnu.org/licenses/agpl-3.0)\n[![Docker Pulls](https://img.shields.io/docker/pulls/drakkan/sftpgo)](https://hub.docker.com/r/drakkan/sftpgo)\n[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)\n\n[English](./README.md) | [\u7b80\u4f53\u4e2d\u6587](./README.zh_CN.md)\n\nFully featured and highly configurable SFTP server with optional HTTP/S, FTP/S and WebDAV support.\nSeveral storage backends are supported: local filesystem, encrypted local filesystem, S3 (compatible) Object Storage, Google Cloud Storage, Azure Blob Storage, SFTP.\n\n## Sponsors\n\nIf you find SFTPGo useful please consider supporting this Open Source project.\n\nMaintaining and evolving SFTPGo is a lot of work - easily the equivalent of a full time job - for me.\n\nI'd like to make SFTPGo into a sustainable long term project and would not like to introduce a dual licensing option and limit some features to the proprietary version only.\n\nIf you use SFTPGo, it is in your best interest to ensure that the project you rely on stays healthy and well maintained.\nThis can only happen with your donations and [sponsorships](https://github.com/sponsors/drakkan) :heart:\n\nWith sponsorships/donations we establish a channel for reciprocal access, ensuring better outcomes for both you and the project.\n\nIf you just take and don't return anything back, the project will die in the long run and you will be forced to pay for a similar proprietary solution.\n\nMore [info](https://github.com/drakkan/sftpgo/issues/452).\n\n### Thank you to our sponsors\n\n#### Platinum sponsors\n\n[\"Aledade](https://www.aledade.com/)\n\n#### Silver sponsors\n\n[\"Dendi](https://dendisoftware.com/)\n\n#### Bronze sponsors\n\n[\"7digital](https://www.7digital.com/)\n\n## Support policy\n\nSFTPGo is an Open Source project and you can of course use it for free but please don't ask for free support as well.\n\nWe will check the reported issues to see if you are experiencing a bug and if so we'll will fix it, but will only provide support to project [sponsors/donors](#sponsors).\n\nIf you report an invalid issue or ask for step-by-step support, your issue will remain open with no answer or will be closed as invalid without further explanation. Thanks for understanding.\n\n## Features\n\n- Support for serving local filesystem, encrypted local filesystem, S3 Compatible Object Storage, Google Cloud Storage, Azure Blob Storage or other SFTP accounts over SFTP/SCP/FTP/WebDAV.\n- Virtual folders are supported: a virtual folder can use any of the supported storage backends. So you can have, for example, a user with the S3 backend mapping a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.\n- Configurable [custom commands and/or HTTP hooks](./docs/custom-actions.md) on upload, pre-upload, download, pre-download, delete, pre-delete, rename, mkdir, rmdir on SSH commands and on user add, update and delete.\n- Virtual accounts stored within a \"data provider\".\n- SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (key/value store in pure Go) and in-memory data providers are supported.\n- Chroot isolation for local accounts. Cloud-based accounts can be restricted to a certain base path.\n- Per-user and per-directory virtual permissions, for each path you can allow or deny: directory listing, upload, overwrite, download, delete, rename, create directories, create symlinks, change owner/group/file mode and modification time.\n- [REST API](./docs/rest-api.md) for users and folders management, data retention, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.\n- The [Event Manager](./docs/eventmanager.md) allows to define custom workflows based on server events or schedules.\n- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.\n- [Web client interface](./docs/web-client.md) so that end users can change their credentials, manage and share their files in the browser.\n- Public key and password authentication. Multiple public keys per-user are supported.\n- SSH user [certificate authentication](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).\n- Keyboard interactive authentication. You can easily setup a customizable multi-factor authentication.\n- Partial authentication. You can configure multi-step authentication requiring, for example, the user password after successful public key authentication.\n- Per-user authentication methods.\n- [Two-factor authentication](./docs/howto/two-factor-authentication.md) based on time-based one time passwords (RFC 6238) which works with Authy, Google Authenticator and other compatible apps.\n- Simplified user administrations using [groups](./docs/groups.md).\n- [Roles](./docs/roles.md) allow you to create limited administrators who can only create and manage users with their role.\n- Custom authentication via [external programs/HTTP API](./docs/external-auth.md).\n- Web Client and Web Admin user interfaces support [OpenID Connect](https://openid.net/connect/) authentication and so they can be integrated with identity providers such as [Keycloak](https://www.keycloak.org/). You can find more details [here](./docs/oidc.md).\n- [Data At Rest Encryption](./docs/dare.md).\n- Dynamic user modification before login via [external programs/HTTP API](./docs/dynamic-user-mod.md).\n- Quota support: accounts can have individual disk quota expressed as max total size and/or max number of files.\n- Bandwidth throttling, with separate settings for upload and download and overrides based on the client's IP address.\n- Data transfer bandwidth limits, with total limit or separate settings for uploads and downloads and overrides based on the client's IP address. Limits can be reset using the REST API.\n- Per-protocol [rate limiting](./docs/rate-limiting.md) is supported and can be optionally connected to the built-in defender to automatically block hosts that repeatedly exceed the configured limit.\n- Per-user maximum concurrent sessions.\n- Per-user and global IP filters: login can be restricted to specific ranges of IP addresses or to a specific IP address.\n- Per-user and per-directory shell like patterns filters: files can be allowed, denied and optionally hidden based on shell like patterns.\n- Automatically terminating idle connections.\n- Automatic blocklist management using the built-in [defender](./docs/defender.md).\n- Geo-IP filtering using a [plugin](https://github.com/sftpgo/sftpgo-plugin-geoipfilter).\n- Atomic uploads are configurable.\n- Per-user files/folders ownership mapping: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (\\*NIX only).\n- Support for Git repositories over SSH.\n- SCP and rsync are supported.\n- FTP/S is supported. You can configure the FTP service to require TLS for both control and data connections.\n- [WebDAV](./docs/webdav.md) is supported.\n- ACME protocol is supported. SFTPGo can obtain and automatically renew TLS certificates for HTTPS, WebDAV and FTPS from `Let's Encrypt` or other ACME compliant certificate authorities, using the the `HTTP-01` or `TLS-ALPN-01` [challenge types](https://letsencrypt.org/docs/challenge-types/).\n- Two-Way TLS authentication, aka TLS with client certificate authentication, is supported for REST API/Web Admin, FTPS and WebDAV over HTTPS.\n- Per-user protocols restrictions. You can configure the allowed protocols (SSH/HTTP/FTP/WebDAV) for each user.\n- [Prometheus metrics](./docs/metrics.md) are supported.\n- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP service without losing the information about the client's address.\n- Easy [migration](./examples/convertusers) from Linux system user accounts.\n- [Portable mode](./docs/portable-mode.md): a convenient way to share a single directory on demand.\n- [SFTP subsystem mode](./docs/sftp-subsystem.md): you can use SFTPGo as OpenSSH's SFTP subsystem.\n- Performance analysis using built-in [profiler](./docs/profiling.md).\n- Configuration format is at your choice: JSON, TOML, YAML, HCL, envfile are supported.\n- Log files are accurate and they are saved in the easily parsable JSON format ([more information](./docs/logs.md)).\n- SFTPGo supports a [plugin system](./docs/plugins.md) and therefore can be extended using external plugins.\n- Infrastructure as Code (IaC) support using the [Terraform provider](https://registry.terraform.io/providers/drakkan/sftpgo/latest).\n\n## Platforms\n\nSFTPGo is developed and tested on Linux. After each commit, the code is automatically built and tested on Linux, macOS, Windows and FreeBSD. Other *BSD variants should work too.\n\n## Requirements\n\n- Go as build only dependency. We support the Go version(s) used in [continuous integration workflows](./.github/workflows).\n- A suitable SQL server to use as data provider:\n - upstream supported versions of PostgreSQL, MySQL and MariaDB.\n - CockroachDB stable.\n- The SQL server is optional: you can choose to use an embedded SQLite, bolt or in memory data provider.\n\n## Installation\n\nBinary releases for Linux, macOS, and Windows are available. Please visit the [releases](https://github.com/drakkan/sftpgo/releases \"releases\") page.\n\nAn official Docker image is available. Documentation is [here](./docker/README.md).\n\n
\n\nSome Linux distro packages are available\n\n- For Arch Linux via AUR:\n - [sftpgo](https://aur.archlinux.org/packages/sftpgo/). This package follows stable releases. It requires `git`, `gcc` and `go` to build.\n - [sftpgo-bin](https://aur.archlinux.org/packages/sftpgo-bin/). This package follows stable releases downloading the prebuilt linux binary from GitHub. It does not require `git`, `gcc` and `go` to build.\n - [sftpgo-git](https://aur.archlinux.org/packages/sftpgo-git/). This package builds and installs the latest git `main` branch. It requires `git`, `gcc` and `go` to build.\n- Deb and RPM packages are built after each commit and for each release.\n- For Ubuntu a PPA is available [here](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo).\n- Void Linux provides an [official package](https://github.com/void-linux/void-packages/tree/master/srcpkgs/sftpgo).\n\n
\n\nAPT and YUM repositories are [available](./docs/repo.md).\n\nSFTPGo is also available on some marketplaces:\n\n- [AWS Marketplace](https://aws.amazon.com/marketplace/seller-profile?id=6e849ab8-70a6-47de-9a43-13c3fa849335)\n- [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/eliamarzia1667381463185.sftpgo_linux)\n- [Elest.io](https://elest.io/open-source/sftpgo)\n\nPurchasing from there will help keep SFTPGo a long-term sustainable project.\n\n
Windows packages\n\n- The Windows installer to install and run SFTPGo as a Windows service.\n- The portable package to start SFTPGo on demand.\n- The [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/install) package to install and run SFTPGo as a Windows service: `winget install SFTPGo`.\n- The [Chocolatey package](https://community.chocolatey.org/packages/sftpgo) to install and run SFTPGo as a Windows service.\n\n
\n\nOn macOS you can install from the Homebrew [Formula](https://formulae.brew.sh/formula/sftpgo).\nOn FreeBSD you can install from the [SFTPGo port](https://www.freshports.org/ftp/sftpgo).\nOn DragonFlyBSD you can install SFTPGo from [DPorts](https://github.com/DragonFlyBSD/DPorts/tree/master/ftp/sftpgo).\n\nYou can easily test new features selecting a commit from the [Actions](https://github.com/drakkan/sftpgo/actions) page and downloading the matching build artifacts for Linux, macOS or Windows. GitHub stores artifacts for 90 days.\n\nAlternately, you can [build from source](./docs/build-from-source.md).\n\n[Getting Started Guide for the Impatient](./docs/howto/getting-started.md).\n\n## Configuration\n\nA full explanation of all configuration methods can be found [here](./docs/full-configuration.md).\n\nPlease make sure to [initialize the data provider](#data-provider-initialization-and-management) before running the daemon.\n\nTo start SFTPGo with the default settings, simply run:\n\n```bash\nsftpgo serve\n```\n\nCheck out [this documentation](./docs/service.md) if you want to run SFTPGo as a service.\n\n### Data provider initialization and management\n\nBefore starting the SFTPGo server please ensure that the configured data provider is properly initialized/updated.\n\nFor PostgreSQL, MySQL and CockroachDB providers, you need to create the configured database. For SQLite, the configured database will be automatically created at startup. Memory and bolt data providers do not require an initialization but they could require an update to the existing data after upgrading SFTPGo.\n\nSFTPGo will attempt to automatically detect if the data provider is initialized/updated and if not, will attempt to initialize/ update it on startup as needed.\n\nAlternately, you can create/update the required data provider structures yourself using the `initprovider` command.\n\nFor example, you can simply execute the following command from the configuration directory:\n\n```bash\nsftpgo initprovider\n```\n\nTake a look at the CLI usage to learn how to specify a different configuration file:\n\n```bash\nsftpgo initprovider --help\n```\n\nYou can disable automatic data provider checks/updates at startup by setting the `update_mode` configuration key to `1`.\n\nYou can also reset your provider by using the `resetprovider` sub-command. Take a look at the CLI usage for more details:\n\n```bash\nsftpgo resetprovider --help\n```\n\n:warning: Please note that some data providers (e.g. MySQL and CockroachDB) do not support schema changes within a transaction, this means that you may end up with an inconsistent schema if migrations are forcibly aborted. CockroachDB doesn't support database-level locks, so make sure you don't execute migrations concurrently.\n\n## Create the first admin\n\nTo start using SFTPGo you need to create an admin user, you can do it in several ways:\n\n- by using the web admin interface. The default URL is [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)\n- by loading initial data\n- by enabling `create_default_admin` in your configuration file and setting the environment variables `SFTPGO_DEFAULT_ADMIN_USERNAME` and `SFTPGO_DEFAULT_ADMIN_PASSWORD`\n\n## Upgrading\n\nSFTPGo supports upgrading from the previous release branch to the current one.\nSome examples for supported upgrade paths are:\n\n- from 2.1.x to 2.2.x\n- from 2.2.x to 2.3.x and so on.\n\nFor supported upgrade paths, the data and schema are migrated automatically when SFTPGo starts, alternatively you can use the `initprovider` command before starting SFTPGo.\n\nSo if, for example, you want to upgrade from 2.0.x to 2.2.x, you must first install version 2.1.x, update the data provider (automatically, by starting SFTPGo or manually using the `initprovider` command) and finally install the version 2.2.x. It is recommended to always install the latest available minor version, ie do not install 2.1.0 if 2.1.2 is available.\n\nLoading data from a provider independent JSON dump is supported from the previous release branch to the current one too. After upgrading SFTPGo it is advisable to regenerate the JSON dump from the new version.\n\n## Downgrading\n\nIf for some reason you want to downgrade SFTPGo, you may need to downgrade your data provider schema and data as well. You can use the `revertprovider` command for this task.\n\nAs for upgrading, SFTPGo supports downgrading from the previous release branch to the current one.\n\nSo, if you plan to downgrade from 2.3.x to 2.2.x, before uninstalling 2.3.x version, you can prepare your data provider executing the following command from the configuration directory:\n\n```shell\nsftpgo revertprovider\n```\n\nTake a look at the CLI usage to learn how to specify a configuration file:\n\n```shell\nsftpgo revertprovider --help\n```\n\nThe `revertprovider` command is not supported for the memory provider.\n\nPlease note that we only support the current release branch and the current main branch, if you find a bug it is better to report it rather than downgrading to an older unsupported version.\n\n## Users, groups, folders and other resource management\n\nAfter starting SFTPGo you can manage users, groups, folders and other resources using:\n\n- the [WebAdmin UI](./docs/web-admin.md)\n- the [REST API](./docs/rest-api.md)\n\nTo support embedded data providers like `bolt` and `SQLite`, which do not support concurrent connections, we can't have a CLI that directly write users and other resources to the data provider, we always have to use the REST API.\n\nFull details for users, groups, folders, admins and other resources are documented in the [OpenAPI](./openapi/openapi.yaml) schema. If you want to render the schema without importing it manually, you can explore it on [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml).\n\n:warning: SFTPGo users, groups and folders are virtual and therefore unrelated to the system ones. There is no need to create system-wide users and groups.\n\n## Tutorials\n\nSome step-to-step tutorials can be found inside the source tree [howto](./docs/howto \"How-to\") directory.\n\n## Authentication options\n\n
External Authentication\n\nCustom authentication methods can easily be added. SFTPGo supports external authentication modules, and writing a new backend can be as simple as a few lines of shell script. More information can be found [here](./docs/external-auth.md).\n\n
\n\n
Keyboard Interactive Authentication\n\nKeyboard interactive authentication is, in general, a series of questions asked by the server with responses provided by the client.\nThis authentication method is typically used for multi-factor authentication.\n\nMore information can be found [here](./docs/keyboard-interactive.md).\n\n
\n\n## Dynamic user creation or modification\n\nA user can be created or modified by an external program just before the login. More information about this can be found [here](./docs/dynamic-user-mod.md).\n\n## Custom Actions\n\nSFTPGo allows you to configure custom commands and/or HTTP hooks to receive notifications about file uploads, deletions and several other events.\n\nMore information about custom actions can be found [here](./docs/custom-actions.md).\n\n## Virtual folders\n\nDirectories outside the user home directory or based on a different storage provider can be mapped as virtual folders, more information [here](./docs/virtual-folders.md).\n\n## Other hooks\n\nYou can get notified as soon as a new connection is established using the [Post-connect hook](./docs/post-connect-hook.md) and after each login using the [Post-login hook](./docs/post-login-hook.md).\nYou can use your own hook to [check passwords](./docs/check-password-hook.md).\n\n## Storage backends\n\n### S3/GCP/Azure\n\nEach user can be mapped with a [S3 Compatible Object Storage](./docs/s3.md) /[Google Cloud Storage](./docs/google-cloud-storage.md)/[Azure Blob Storage](./docs/azure-blob-storage.md) bucket or a bucket virtual folder.\n\n### SFTP backend\n\nEach user can be mapped to another SFTP server account or a subfolder of it. More information can be found [here](./docs/sftpfs.md).\n\n### Encrypted backend\n\nData at-rest encryption is supported via the [cryptfs backend](./docs/dare.md).\n\n### HTTP/S backend\n\nHTTP/S backend allows you to write your own custom storage backend by implementing a REST API. More information can be found [here](./docs/httpfs.md).\n\n### Other Storage backends\n\nAdding new storage backends is quite easy:\n\n- implement the [Fs interface](./internal/vfs/vfs.go#L86 \"interface for filesystem backends\").\n- update the user method `GetFilesystem` to return the new backend\n- update the web interface and the REST API CLI\n- add the flags for the new storage backed to the `portable` mode\n\nAnyway, some backends require a pay per-use account (or they offer free account for a limited time period only). To be able to add support for such backends or to review pull requests, please provide a test account. The test account must be available for enough time to be able to maintain the backend and do basic tests before each new release.\n\n## Brute force protection\n\nSFTPGo supports a built-in [defender](./docs/defender.md).\n\nAlternately you can use the [connection failed logs](./docs/logs.md) for integration in tools such as [Fail2ban](http://www.fail2ban.org/). Example of [jails](./fail2ban/jails) and [filters](./fail2ban/filters) working with `systemd`/`journald` are available in fail2ban directory.\n\n## Account's configuration properties\n\nDetails information about account configuration properties can be found [here](./docs/account.md).\n\n## Performance\n\nSFTPGo can easily saturate a Gigabit connection on low end hardware with no special configuration, this is generally enough for most use cases.\n\nMore in-depth analysis of performance can be found [here](./docs/performance.md).\n\n## Release Cadence\n\nSFTPGo releases are feature-driven, we don't have a fixed time based schedule. As a rough estimate, you can expect 1 or 2 new releases per year.\n\n## Acknowledgements\n\nSFTPGo makes use of the third party libraries listed inside [go.mod](./go.mod).\n\nWe are very grateful to all the people who contributed with ideas and/or pull requests.\n\nThank you [ysura](https://www.ysura.com/) for granting me stable access to a test AWS S3 account.\n\n## License\n\nGNU AGPL-3.0-only\n", - "source_links": [], - "id": 114 - }, - { - "page_link": "https://github.com/SonarSource/sonarqube", - "title": "sonarqube readme", - "text": "# SonarQube [![Build Status](https://app.travis-ci.com/SonarSource/sonarqube.svg?branch=master)](https://app.travis-ci.com/SonarSource/sonarqube) [![Quality Gate Status](https://next.sonarqube.com/sonarqube/api/project_badges/measure?project=sonarqube&metric=alert_status&token=d95182127dd5583f57578d769b511660601a8547)](https://next.sonarqube.com/sonarqube/dashboard?id=sonarqube)\n\n## Continuous Inspection\n\nSonarQube provides the capability to not only show health of an application but also to highlight issues newly introduced. With a Quality Gate in place, you can [Clean as You Code](https://www.sonarsource.com/blog/clean-as-you-code/) and therefore improve code quality systematically.\n\n## Links\n\n- [Website](https://www.sonarqube.org)\n- [Download](https://www.sonarqube.org/downloads/)\n- [Documentation](https://docs.sonarqube.org)\n- [Twitter](https://twitter.com/SonarQube)\n- [SonarSource](https://www.sonarsource.com), author of SonarQube\n- [Issue tracking](https://jira.sonarsource.com/browse/SONAR/), read-only. Only SonarSourcers can create tickets.\n- [Responsible Disclosure](https://community.sonarsource.com/t/responsible-vulnerability-disclosure/9317)\n- [Next](https://next.sonarqube.com/sonarqube) instance of the next SonarQube version\n\n## Have Question or Feedback?\n\nFor support questions (\"How do I?\", \"I got this error, why?\", ...), please first read the [documentation](https://docs.sonarqube.org) and then head to the [SonarSource Community](https://community.sonarsource.com/c/help/sq/10). The answer to your question has likely already been answered! \ud83e\udd13\n\nBe aware that this forum is a community, so the standard pleasantries (\"Hi\", \"Thanks\", ...) are expected. And if you don't get an answer to your thread, you should sit on your hands for at least three days before bumping it. Operators are not standing by. \ud83d\ude04\n\n## Contributing\n\nIf you would like to see a new feature, please create a new Community thread: [\"Suggest new features\"](https://community.sonarsource.com/c/suggestions/features).\n\nPlease be aware that we are not actively looking for feature contributions. The truth is that it's extremely difficult for someone outside SonarSource to comply with our roadmap and expectations. Therefore, we typically only accept minor cosmetic changes and typo fixes.\n\nWith that in mind, if you would like to submit a code contribution, please create a pull request for this repository. Please explain your motives to contribute this change: what problem you are trying to fix, what improvement you are trying to make.\n\nMake sure that you follow our [code style](https://github.com/SonarSource/sonar-developer-toolset#code-style) and all tests are passing (Travis build is executed for each pull request).\n\nWilling to contribute to SonarSource products? We are looking for smart, passionate, and skilled people to help us build world-class code quality solutions. Have a look at our current [job offers here](https://www.sonarsource.com/company/jobs/)!\n\n## Building\n\nTo build sources locally follow these instructions.\n\n### Build and Run Unit Tests\n\nExecute from project base directory:\n\n ./gradlew build\n\nThe zip distribution file is generated in `sonar-application/build/distributions/`. Unzip it and start server by executing:\n\n # on linux\n bin/linux-x86-64/sonar.sh start\n # or on MacOS\n bin/macosx-universal-64/sonar.sh start\n # or on Windows\n bin\\windows-x86-64\\StartSonar.bat\n\n### Open in IDE\n\nIf the project has never been built, then build it as usual (see previous section) or use the quicker command:\n\n ./gradlew ide\n\nThen open the root file `build.gradle` as a project in Intellij or Eclipse.\n\n### Gradle Hints\n\n| ./gradlew command | Description |\n| -------------------------------- | ----------------------------------------- |\n| `dependencies` | list dependencies |\n| `licenseFormat --rerun-tasks` | fix source headers by applying HEADER.txt |\n| `wrapper --gradle-version 5.2.1` | upgrade wrapper |\n\n## License\n\nCopyright 2008-2023 SonarSource.\n\nLicensed under the [GNU Lesser General Public License, Version 3.0](https://www.gnu.org/licenses/lgpl.txt)\n", - "source_links": [], - "id": 115 - }, - { - "page_link": "https://github.com/apache/spark", - "title": "spark readme", - "text": "# Apache Spark\n\nSpark is a unified analytics engine for large-scale data processing. It provides\nhigh-level APIs in Scala, Java, Python, and R, and an optimized engine that\nsupports general computation graphs for data analysis. It also supports a\nrich set of higher-level tools including Spark SQL for SQL and DataFrames,\npandas API on Spark for pandas workloads, MLlib for machine learning, GraphX for graph processing,\nand Structured Streaming for stream processing.\n\n\n\n[![GitHub Actions Build](https://github.com/apache/spark/actions/workflows/build_main.yml/badge.svg)](https://github.com/apache/spark/actions/workflows/build_main.yml)\n[![AppVeyor Build](https://img.shields.io/appveyor/ci/ApacheSoftwareFoundation/spark/master.svg?style=plastic&logo=appveyor)](https://ci.appveyor.com/project/ApacheSoftwareFoundation/spark)\n[![PySpark Coverage](https://codecov.io/gh/apache/spark/branch/master/graph/badge.svg)](https://codecov.io/gh/apache/spark)\n[![PyPI Downloads](https://static.pepy.tech/personalized-badge/pyspark?period=month&units=international_system&left_color=black&right_color=orange&left_text=PyPI%20downloads)](https://pypi.org/project/pyspark/)\n\n\n## Online Documentation\n\nYou can find the latest Spark documentation, including a programming\nguide, on the [project web page](https://spark.apache.org/documentation.html).\nThis README file only contains basic setup instructions.\n\n## Building Spark\n\nSpark is built using [Apache Maven](https://maven.apache.org/).\nTo build Spark and its example programs, run:\n\n```bash\n./build/mvn -DskipTests clean package\n```\n\n(You do not need to do this if you downloaded a pre-built package.)\n\nMore detailed documentation is available from the project site, at\n[\"Building Spark\"](https://spark.apache.org/docs/latest/building-spark.html).\n\nFor general development tips, including info on developing Spark using an IDE, see [\"Useful Developer Tools\"](https://spark.apache.org/developer-tools.html).\n\n## Interactive Scala Shell\n\nThe easiest way to start using Spark is through the Scala shell:\n\n```bash\n./bin/spark-shell\n```\n\nTry the following command, which should return 1,000,000,000:\n\n```scala\nscala> spark.range(1000 * 1000 * 1000).count()\n```\n\n## Interactive Python Shell\n\nAlternatively, if you prefer Python, you can use the Python shell:\n\n```bash\n./bin/pyspark\n```\n\nAnd run the following command, which should also return 1,000,000,000:\n\n```python\n>>> spark.range(1000 * 1000 * 1000).count()\n```\n\n## Example Programs\n\nSpark also comes with several sample programs in the `examples` directory.\nTo run one of them, use `./bin/run-example [params]`. For example:\n\n```bash\n./bin/run-example SparkPi\n```\n\nwill run the Pi example locally.\n\nYou can set the MASTER environment variable when running examples to submit\nexamples to a cluster. This can be a mesos:// or spark:// URL,\n\"yarn\" to run on YARN, and \"local\" to run\nlocally with one thread, or \"local[N]\" to run locally with N threads. You\ncan also use an abbreviated class name if the class is in the `examples`\npackage. For instance:\n\n```bash\nMASTER=spark://host:7077 ./bin/run-example SparkPi\n```\n\nMany of the example programs print usage help if no params are given.\n\n## Running Tests\n\nTesting first requires [building Spark](#building-spark). Once Spark is built, tests\ncan be run using:\n\n```bash\n./dev/run-tests\n```\n\nPlease see the guidance on how to\n[run tests for a module, or individual tests](https://spark.apache.org/developer-tools.html#individual-tests).\n\nThere is also a Kubernetes integration test, see resource-managers/kubernetes/integration-tests/README.md\n\n## A Note About Hadoop Versions\n\nSpark uses the Hadoop core library to talk to HDFS and other Hadoop-supported\nstorage systems. Because the protocols have changed in different versions of\nHadoop, you must build Spark against the same version that your cluster runs.\n\nPlease refer to the build documentation at\n[\"Specifying the Hadoop Version and Enabling YARN\"](https://spark.apache.org/docs/latest/building-spark.html#specifying-the-hadoop-version-and-enabling-yarn)\nfor detailed guidance on building for a particular distribution of Hadoop, including\nbuilding for particular Hive and Hive Thriftserver distributions.\n\n## Configuration\n\nPlease refer to the [Configuration Guide](https://spark.apache.org/docs/latest/configuration.html)\nin the online documentation for an overview on how to configure Spark.\n\n## Contributing\n\nPlease review the [Contribution to Spark guide](https://spark.apache.org/contributing.html)\nfor information on how to get started contributing to the project.\n", - "source_links": [], - "id": 116 - }, - { - "page_link": null, - "title": "strapi readme", - "text": null, - "source_links": [], - "id": 117 - }, - { - "page_link": "https://github.com/apache/superset", - "title": "superset readme", - "text": "\n\n# Superset\n\n[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\n[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/apache/superset?sort=semver)](https://github.com/apache/superset/tree/latest)\n[![Build Status](https://github.com/apache/superset/workflows/Python/badge.svg)](https://github.com/apache/superset/actions)\n[![PyPI version](https://badge.fury.io/py/apache-superset.svg)](https://badge.fury.io/py/apache-superset)\n[![Coverage Status](https://codecov.io/github/apache/superset/coverage.svg?branch=master)](https://codecov.io/github/apache/superset)\n[![PyPI](https://img.shields.io/pypi/pyversions/apache-superset.svg?maxAge=2592000)](https://pypi.python.org/pypi/apache-superset)\n[![Get on Slack](https://img.shields.io/badge/slack-join-orange.svg)](http://bit.ly/join-superset-slack)\n[![Documentation](https://img.shields.io/badge/docs-apache.org-blue.svg)](https://superset.apache.org)\n\n\n\nA modern, enterprise-ready business intelligence web application.\n\n[**Why Superset?**](#why-superset) |\n[**Supported Databases**](#supported-databases) |\n[**Installation and Configuration**](#installation-and-configuration) |\n[**Release Notes**](RELEASING/README.md#release-notes-for-recent-releases) |\n[**Get Involved**](#get-involved) |\n[**Contributor Guide**](#contributor-guide) |\n[**Resources**](#resources) |\n[**Organizations Using Superset**](RESOURCES/INTHEWILD.md)\n\n## Why Superset?\n\nSuperset is a modern data exploration and data visualization platform. Superset can replace or augment proprietary business intelligence tools for many teams. Superset integrates well with a variety of data sources.\n\nSuperset provides:\n\n- A **no-code interface** for building charts quickly\n- A powerful, web-based **SQL Editor** for advanced querying\n- A **lightweight semantic layer** for quickly defining custom dimensions and metrics\n- Out of the box support for **nearly any SQL** database or data engine\n- A wide array of **beautiful visualizations** to showcase your data, ranging from simple bar charts to geospatial visualizations\n- Lightweight, configurable **caching layer** to help ease database load\n- Highly extensible **security roles and authentication** options\n- An **API** for programmatic customization\n- A **cloud-native architecture** designed from the ground up for scale\n\n## Screenshots & Gifs\n\n**Large Gallery of Visualizations**\n\n
\n\n**Craft Beautiful, Dynamic Dashboards**\n\n
\n\n**No-Code Chart Builder**\n\n
\n\n**Powerful SQL Editor**\n\n
\n\n## Supported Databases\n\nSuperset can query data from any SQL-speaking datastore or data engine (Presto, Trino, Athena, [and more](https://superset.apache.org/docs/databases/installing-database-drivers/)) that has a Python DB-API driver and a SQLAlchemy dialect.\n\nHere are some of the major database solutions that are supported:\n\n

\n \"redshift\"\n \"google-biquery\"\n \"snowflake\"\n \"trino\"\n \"presto\"\n \"databricks\"\n \"druid\"\n \"firebolt\"\n \"timescale\"\n \"rockset\"\n \"postgresql\"\n \"mysql\"\n \"mssql-server\"\n \"db2\"\n \"sqlite\"\n \"sybase\"\n \"mariadb\"\n \"vertica\"\n \"oracle\"\n \"firebird\"\n \"greenplum\"\n \"clickhouse\"\n \"exasol\"\n \"monet-db\"\n \"apache-kylin\"\n \"hologres\"\n \"netezza\"\n \"pinot\"\n \"teradata\"\n \"yugabyte\"\n

\n\n**A more comprehensive list of supported databases** along with the configuration instructions can be found [here](https://superset.apache.org/docs/databases/installing-database-drivers).\n\nWant to add support for your datastore or data engine? Read more [here](https://superset.apache.org/docs/frequently-asked-questions#does-superset-work-with-insert-database-engine-here) about the technical requirements.\n\n## Installation and Configuration\n\n[Extended documentation for Superset](https://superset.apache.org/docs/installation/installing-superset-using-docker-compose)\n\n## Get Involved\n\n- Ask and answer questions on [StackOverflow](https://stackoverflow.com/questions/tagged/apache-superset) using the **apache-superset** tag\n- [Join our community's Slack](http://bit.ly/join-superset-slack)\n and please read our [Slack Community Guidelines](https://github.com/apache/superset/blob/master/CODE_OF_CONDUCT.md#slack-community-guidelines)\n- [Join our dev@superset.apache.org Mailing list](https://lists.apache.org/list.html?dev@superset.apache.org)\n\n## Contributor Guide\n\nInterested in contributing? Check out our\n[CONTRIBUTING.md](https://github.com/apache/superset/blob/master/CONTRIBUTING.md)\nto find resources around contributing along with a detailed guide on\nhow to set up a development environment.\n\n## Resources\n\nSuperset 2.0!\n- [Superset 2.0 Meetup](https://preset.io/events/superset-2-0-meetup/)\n- [Superset 2.0 Release Notes](https://github.com/apache/superset/tree/master/RELEASING/release-notes-2-0)\n\nUnderstanding the Superset Points of View\n- [The Case for Dataset-Centric Visualization](https://preset.io/blog/dataset-centric-visualization/)\n- [Understanding the Superset Semantic Layer](https://preset.io/blog/understanding-superset-semantic-layer/)\n\n\n- Getting Started with Superset\n - [Superset in 2 Minutes using Docker Compose](https://superset.apache.org/docs/installation/installing-superset-using-docker-compose#installing-superset-locally-using-docker-compose)\n - [Installing Database Drivers](https://superset.apache.org/docs/databases/docker-add-drivers/)\n - [Building New Database Connectors](https://preset.io/blog/building-database-connector/)\n - [Create Your First Dashboard](https://superset.apache.org/docs/creating-charts-dashboards/first-dashboard)\n - [Comprehensive Tutorial for Contributing Code to Apache Superset\n ](https://preset.io/blog/tutorial-contributing-code-to-apache-superset/)\n- [Resources to master Superset by Preset](https://preset.io/resources/)\n\n- Deploying Superset\n - [Official Docker image](https://hub.docker.com/r/apache/superset)\n - [Helm Chart](https://github.com/apache/superset/tree/master/helm/superset)\n\n- Recordings of Past [Superset Community Events](https://preset.io/events)\n - [Mixed Time Series Charts](https://preset.io/events/mixed-time-series-visualization-in-superset-workshop/) \n - [How the Bing Team Customized Superset for the Internal Self-Serve Data & Analytics Platform](https://preset.io/events/how-the-bing-team-heavily-customized-superset-for-their-internal-data/)\n - [Live Demo: Visualizing MongoDB and Pinot Data using Trino](https://preset.io/events/2021-04-13-visualizing-mongodb-and-pinot-data-using-trino/)\n\t- [Introduction to the Superset API](https://preset.io/events/introduction-to-the-superset-api/)\n\t- [Building a Database Connector for Superset](https://preset.io/events/2021-02-16-building-a-database-connector-for-superset/)\n\n- Visualizations\n - [Creating Viz Plugins](https://superset.apache.org/docs/contributing/creating-viz-plugins/)\n - [Managing and Deploying Custom Viz Plugins](https://medium.com/nmc-techblog/apache-superset-manage-custom-viz-plugins-in-production-9fde1a708e55)\n - [Why Apache Superset is Betting on Apache ECharts](https://preset.io/blog/2021-4-1-why-echarts/)\n\n- [Superset API](https://superset.apache.org/docs/rest-api)\n", - "source_links": [], - "id": 118 - }, - { - "page_link": "pip-packages.md", - "title": "pip-packages", - "text": "## Adding additional pip packages\n\nSuperset doesn't come pre-installed with all drivers you might need for your visualizations. To add additional db drivers, the most stable solution is to extend our docker image, which is relatively easy to do.\n\n### Build your extended image\n\nFirst copy the dockerfile here https://github.com/pluralsh/plural-artifacts/blob/main/superset/Dockerfile to wherever you want to manage your image. You'll also want to copy the requirements.txt in the same subfolder, and add whatever additional packages you want to it. Build the image then push it to your registry of choice to use it in the next step.\n\n### Wire Superset with this image\n\n\nYou'll then want to edit `superset/helm/superset/values.yaml` in your installation repo with something like:\n\n```yaml\nsuperset:\n superset:\n image:\n repository: your.docker.repository\n tag: your-tag\n```\n\nAlternatively, you should be able to do this in the configuration section for superset in your plural console as well.\n\n### redeploy\n\nfrom there you can simply run `plural build --only airflow && plural deploy --commit \"using custom docker image\"` to set this up", - "source_links": [], - "id": 119 - }, - { - "page_link": "https://github.com/grafana/tempo", - "title": "tempo readme", - "text": "

\"Tempo

\n

\n \"Latest\n \"License\"\n \"Docker\n \"Slack\"\n \"Community\n \"Go\n

\n\nGrafana Tempo is an open source, easy-to-use and high-scale distributed tracing backend. Tempo is cost-efficient, requiring only object storage to operate, and is deeply integrated with Grafana, Prometheus, and Loki.\n\nTempo is Jaeger, Zipkin, Kafka, OpenCensus and OpenTelemetry compatible. It ingests batches in any of the mentioned formats, buffers them and then writes them to Azure, GCS, S3 or local disk. As such it is robust, cheap and easy to operate!\n\nTempo implements [TraceQL](https://grafana.com/docs/tempo/latest/traceql/), a traces-first query language inspired by LogQL and PromQL. This query language allows users to very precisely and easily select spans and jump directly to the spans fulfilling the specified conditions:\n\n

\"Tempo

\n\n## Getting Started\n\n- [Get started documentation](https://grafana.com/docs/tempo/latest/getting-started/)\n- [Deployment Examples](./example)\n - [Docker Compose](./example/docker-compose)\n - [Helm](./example/helm)\n - [Jsonnet](./example/tk)\n\n## Further Reading\n\nTo learn more about Tempo, consult the following documents & talks:\n\n- [New in Grafana Tempo 2.0: Apache Parquet as the default storage format, support for TraceQL][tempo_20_announce]\n- [Get to know TraceQL: A powerful new query language for distributed tracing][traceql-post]\n\n[tempo_20_announce]: https://grafana.com/blog/2023/02/01/new-in-grafana-tempo-2.0-apache-parquet-as-the-default-storage-format-support-for-traceql/\n[traceql-post]: https://grafana.com/blog/2023/02/07/get-to-know-traceql-a-powerful-new-query-language-for-distributed-tracing/\n\n## Getting Help\n\nIf you have any questions or feedback regarding Tempo:\n\n- Grafana Labs hosts a [forum](https://community.grafana.com/c/grafana-tempo/40) for Tempo. This is a great place to post questions and search for answers.\n- Ask a question on the [Tempo Slack channel](https://grafana.slack.com/archives/C01D981PEE5).\n- [File an issue](https://github.com/grafana/tempo/issues/new/choose) for bugs, issues and feature suggestions.\n- UI issues should be filed with [Grafana](https://github.com/grafana/grafana/issues/new/choose).\n\n## OpenTelemetry\n\nTempo's receiver layer, wire format and storage format are all based directly on [standards](https://github.com/open-telemetry/opentelemetry-proto) and [code](https://github.com/open-telemetry/opentelemetry-collector) established by [OpenTelemetry](https://opentelemetry.io/). We support open standards at Grafana!\n\nCheck out the [Integration Guides](https://grafana.com/docs/tempo/latest/guides/instrumentation/) to see examples of OpenTelemetry instrumentation with Tempo.\n\n## Other Components\n\n### tempo-vulture\n[tempo-vulture](https://github.com/grafana/tempo/tree/main/cmd/tempo-vulture) is Tempo's bird themed consistency checking tool. It writes traces to Tempo and then queries them back in a variety of ways.\n\n### tempo-cli\n[tempo-cli](https://github.com/grafana/tempo/tree/main/cmd/tempo-cli) is the place to put any utility functionality related to Tempo. See [Documentation](https://grafana.com/docs/tempo/latest/operations/tempo_cli/) for more info.\n\n## License\n\nGrafana Tempo is distributed under [AGPL-3.0-only](LICENSE). For Apache-2.0 exceptions, see [LICENSING.md](LICENSING.md).\n", - "source_links": [], - "id": 120 - }, - { - "page_link": null, - "title": "terraria readme", - "text": null, - "source_links": [], - "id": 121 - }, - { - "page_link": null, - "title": "test-harness readme", - "text": null, - "source_links": [], - "id": 122 - }, - { - "page_link": null, - "title": "test repo readme", - "text": null, - "source_links": [], - "id": 123 - }, - { - "page_link": null, - "title": "test-repo-3 readme", - "text": null, - "source_links": [], - "id": 124 - }, - { - "page_link": "https://github.com/tierrun/tier", - "title": "tier readme", - "text": "

\n \n

\n\n\n# Pricing as Code\n\n`tier` is a tool that lets you define and manage your SaaS application's pricing model in one place (pricing.json). \n\nTier will handle setting up and managing Stripe in a way that is much more friendly for SaaS and consumption based billing models. Tier's SDK can then be implemented for access checking, metering/reporting, and more.\n\n [![GPLv3 License](https://img.shields.io/github/license/tierrun/tier?style=for-the-badge)](https://opensource.org/licenses/)\n [![Commit Activity](https://img.shields.io/github/commit-activity/m/tierrun/tier?style=for-the-badge)]()\n [![Discussions](https://img.shields.io/github/discussions/tierrun/tier?style=for-the-badge\n )](https://github.com/tierrun/tier/discussions)\n [![](https://img.shields.io/github/go-mod/go-version/tierrun/tier?style=for-the-badge\n )]()\n\n\n## Docs and Community\n- [Documentation is available here](https://docs.tier.run/)\n- Join our Slack here: [](https://join.slack.com/t/tier-community/shared_invite/zt-1blotqjb9-wvkYMo8QkhaEWziprdjnIA)\n\n# Key Features and Capabilities\n- Manage your features, plans and their pricing in one place\n- On demand test environments and preview deployments allow you to work with confidence\n- Create custom plans and variants as needed for specific customers or tests\n- Stripe is kept in sync and fully managed by Tier\n- Access Checking and Entitlements are handled by the Tier SDKs \n\n## How to use Tier\n\n1. [Install Tier CLI](#install)\n2. [Create your first pricing.json](https://model.tier.run) and `tier push` to your dev or live environment\n3. [Get a Tier SDK and add it](https://www.tier.run/docs/sdk/) to enable Access Checks and Metering\n\nYou can see and example here: [Tier Hello World!](https://blog.tier.run/tier-hello-world-demo)\n\n

\n \n

\n\n## Install\n\n### Homebrew (macOS)\n\n```\nbrew install tierrun/tap/tier\n```\n### Binary (macOS, linux, Windows)\n\nBinaries for major architectures can be found at [here](https://tier.run/releases).\n\n### Go (most operating systems and architectures)\n\nIf go1.19 or later is installed, running or installing tier like:\n\n```\ngo run tier.run/cmd/tier@latest\n```\n\nor\n\n```\ngo install tier.run/cmd/tier@latest\n```\n\n\n## Authors\n\n- [@bmizerany](https://www.github.com/bmizerany)\n- [@isaacs](https://www.github.com/isaacs)\n- [@jevon](https://www.github.com/jevon)\n\n", - "source_links": [], - "id": 125 - }, - { - "page_link": "https://github.com/trytouca/trytouca", - "title": "touca readme", - "text": "

\n\"touca.io\"\n

\n

\n\n\n\n

\n\n## Continuous Regression Testing for Engineering Teams\n\nTouca provides feedback when you write code that could break your software. It\nremotely compares the behavior and performance of your software against a\nprevious trusted version and visualizes differences in near real-time.\n\n[![Touca Server](https://i.vimeocdn.com/filter/overlay?src0=https%3A%2F%2Fi.vimeocdn.com%2Fvideo%2F1420276355-a2760e21742b267f63e7e1599eefc02329dcc22c2f155f125ff8692c99161e9c-d_1920x1080&src1=http%3A%2F%2Ff.vimeocdn.com%2Fp%2Fimages%2Fcrawler_play.png)](https://vimeo.com/703039452 \"Touca Quick Product Demo\")\n\n## Start for free\n\n[![Server](https://img.shields.io/docker/v/touca/touca)](https://hub.docker.com/r/touca/touca)\n\n### Option 1: Self-host locally\n\nYou can self-host Touca by running the following command on a UNIX machine with\nat least 2GB of RAM, with Docker and Docker Compose installed.\n\n```bash\n/bin/bash -c \"$(curl -fsSL https://touca.io/install.sh)\"\n```\n\n### Option 2: Use Touca Cloud\n\nOr you can use https://app.touca.io that we manage and maintain. We have a free\nplan and offer additional features suitable for larger teams.\n\n## Sneak Peek\n\n> Touca has SDKs in Python, C++, Java, and JavaScript.\n\n[![C++ SDK](https://img.shields.io/static/v1?label=C%2B%2B&message=v1.5.2&color=blue)](https://github.com/trytouca/trytouca/tree/main/sdk/cpp)\n[![Python SDK](https://img.shields.io/pypi/v/touca?label=Python&color=blue)](https://pypi.org/project/touca/)\n[![JavaScript SDK](https://img.shields.io/npm/v/@touca/node?label=JavaScript&color=blue)](https://www.npmjs.com/package/@touca/node)\n[![Java SDK](https://img.shields.io/maven-central/v/io.touca/touca?label=Java&color=blue)](https://search.maven.org/artifact/io.touca/touca)\n\nLet us imagine that we want to test a software workflow that takes the username\nof a student and provides basic information about them.\n\n```python\n@dataclass\nclass Student:\n username: str\n fullname: str\n dob: datetime.date\n gpa: float\n\ndef find_student(username: str) -> Student:\n # ...\n```\n\nWe can use unit testing in which we hard-code a set of usernames and list our\nexpected return value for each input. In this example, the input and output of\nour code under test are `username` and `Student`. If we were testing a video\ncompression algorithm, they may have been video files. In that case:\n\n- Describing the expected output for a given video file would be difficult.\n- When we make changes to our compression algorithm, accurately reflecting those\n changes in our expected values would be time-consuming.\n- We would need a large number of input video files to gain confidence that our\n algorithm works correctly.\n\nTouca makes it easier to continuously test workflows of any complexity and with\nany number of test cases.\n\n```python\nimport touca\nfrom students import find_student\n\n@touca.workflow\ndef students_test(username: str):\n student = find_student(username)\n touca.check(\"username\", student.username)\n touca.check(\"fullname\", student.fullname)\n touca.check(\"birth_date\", student.dob)\n touca.check(\"gpa\", student.gpa)\n```\n\nThis is slightly different from a typical unit test:\n\n- Touca tests do not use expected values.\n- Touca tests do not hard-code input values.\n\nWith Touca, we describe how we run our code under test for any given test case.\nWe can capture values of interesting variables and runtime of important\nfunctions to describe the behavior and performance of our workflow for that test\ncase.\n\nWe can run Touca tests with any number of inputs from the command line:\n\n```bash\ntouca config set api-key=\"\"\ntouca config set api-url=\"https://api.touca.io/@/tutorial\"\ntouca test --revision=1.0 --testcase alice bob charlie\n```\n\nThis command produces the following output:\n\n```text\n\nTouca Test Framework\n\nSuite: students_test/1.0\n\n 1. SENT alice (0 ms)\n 2. SENT bob (0 ms)\n 3. SENT charlie (0 ms)\n\nTests: 3 submitted, 3 total\nTime: 0.39 s\n\n\u2728 Ran all test suites.\n\n```\n\nNow if we make changes to our workflow under test, we can rerun this test and\nrely on Touca to check if our changes affect the behavior or performance of our\nsoftware.\n\nUnlike integration tests, we are not bound to the output of our workflow. We can\ncapture any number of data points and from anywhere within our code. This is\nspecially useful if our workflow has multiple stages. We can capture the output\nof each stage without publicly exposing its API. When any stage changes behavior\nin a future version of our software, our captured data points will help find the\nroot cause more easily.\n\n## Value Proposition\n\nTouca is very effective in addressing common problems in the following\nsituations:\n\n- When we need to test our workflow with a large number of inputs.\n- When the output of our workflow is too complex, or too difficult to describe\n in our unit tests.\n- When interesting information to check for regression is not exposed through\n the interface of our workflow.\n\nThe highlighted design features of Touca can help us test these workflows at any\nscale.\n\n- Decoupling our test input from our test logic helps us manage our long list of\n inputs without modifying the test logic. Managing that list on a remote server\n accessible to all members of our team helps us add notes to each test case,\n explain why they are needed and track their stability and performance changes\n over time.\n- Submitting our test results to a remote server, instead of storing them in\n files, helps us avoid the mundane tasks of managing and processing of test\n results. Touca server retains all test results and makes them accessible to\n all members of the team. It compares test results using their original data\n types and reports discovered differences in real-time to all interested\n members of our team. It helps us audit how our software evolves over time and\n provides high-level information about our tests.\n\n## Documentation\n\nIf you are new to Touca, the best place to start is our\n[documentation website](https://touca.io/docs).\n\n## Community\n\nWe hang on [Discord](https://touca.io/discord). Come say hi! We love making new\nfriends. If you need help, have any questions, or like to contribute or provide\nfeedback, that's the best place to be.\n\n## Contributors\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\n\n\n\n## License\n\nThis repository is released under the Apache-2.0 License. See\n[`LICENSE`](https://github.com/trytouca/trytouca/blob/main/LICENSE).\n", - "source_links": [], - "id": 126 - }, - { - "page_link": null, - "title": "trace-shield readme", - "text": null, - "source_links": [], - "id": 127 - }, - { - "page_link": "https://github.com/trinodb/trino", - "title": "trino readme", - "text": "

\n \"Trino\n

\n

\n Trino is a fast distributed SQL query engine for big data analytics.\n

\n

\n See the User Manual for deployment instructions and end user documentation.\n

\n

\n \n \"Trino\n \n \n \"Trino\n \n \n \"Trino:\n \n

\n\n## Development\n\nSee [DEVELOPMENT](.github/DEVELOPMENT.md) for information about code style,\ndevelopment process, and guidelines.\n\nSee [CONTRIBUTING](.github/CONTRIBUTING.md) for contribution requirements.\n\n## Security\n\nSee the project [security policy](.github/SECURITY.md) for\ninformation about reporting vulnerabilities.\n\n## Build requirements\n\n* Mac OS X or Linux\n* Java 17.0.4+, 64-bit\n* Docker\n\n## Building Trino\n\nTrino is a standard Maven project. Simply run the following command from the\nproject root directory:\n\n ./mvnw clean install -DskipTests\n\nOn the first build, Maven downloads all the dependencies from the internet\nand caches them in the local repository (`~/.m2/repository`), which can take a\nwhile, depending on your connection speed. Subsequent builds are faster.\n\nTrino has a comprehensive set of tests that take a considerable amount of time\nto run, and are thus disabled by the above command. These tests are run by the\nCI system when you submit a pull request. We recommend only running tests\nlocally for the areas of code that you change.\n\n## Running Trino in your IDE\n\n### Overview\n\nAfter building Trino for the first time, you can load the project into your IDE\nand run the server. We recommend using\n[IntelliJ IDEA](http://www.jetbrains.com/idea/). Because Trino is a standard\nMaven project, you easily can import it into your IDE. In IntelliJ, choose\n*Open Project* from the *Quick Start* box or choose *Open*\nfrom the *File* menu and select the root `pom.xml` file.\n\nAfter opening the project in IntelliJ, double check that the Java SDK is\nproperly configured for the project:\n\n* Open the File menu and select Project Structure\n* In the SDKs section, ensure that JDK 17 is selected (create one if none exist)\n* In the Project section, ensure the Project language level is set to 17\n\n### Running a testing server\n\nThe simplest way to run Trino for development is to run the `TpchQueryRunner`\nclass. It will start a development version of the server that is configured with\nthe TPCH connector. You can then use the CLI to execute queries against this\nserver. Many other connectors have their own `*QueryRunner` class that you can\nuse when working on a specific connector.\n\n### Running the full server\n\nTrino comes with sample configuration that should work out-of-the-box for\ndevelopment. Use the following options to create a run configuration:\n\n* Main Class: `io.trino.server.DevelopmentServer`\n* VM Options: `-ea -Dconfig=etc/config.properties -Dlog.levels-file=etc/log.properties -Djdk.attach.allowAttachSelf=true`\n* Working directory: `$MODULE_DIR$`\n* Use classpath of module: `trino-server-dev`\n\nThe working directory should be the `trino-server-dev` subdirectory. In\nIntelliJ, using `$MODULE_DIR$` accomplishes this automatically.\n\nIf `VM options` doesn't exist in the dialog, you need to select `Modify options`\nand enable `Add VM options`.\n\n### Running the CLI\n\nStart the CLI to connect to the server and run SQL queries:\n\n client/trino-cli/target/trino-cli-*-executable.jar\n\nRun a query to see the nodes in the cluster:\n\n SELECT * FROM system.runtime.nodes;\n\nRun a query against the TPCH connector:\n\n SELECT * FROM tpch.tiny.region;\n", - "source_links": [], - "id": 128 - }, - { - "page_link": "adding-catalogs.md", - "title": "adding-catalogs", - "text": "# Adding Data Catalogs\n\nTrino doesn't come with a natural way to preconfigure datasources, but it's relatively easy to do through helm. You simply need to edit `trino/helm/trino/values.yaml` and add something like:\n\n```yaml\ntrino:\n additionalCatalogs:\n lakehouse.properties: |-\n connector.name=iceberg\n hive.metastore.uri=thrift://example.net:9083\n rdbms.properties: |-\n connector.name=postgresql\n connection-url=jdbc:postgresql://example.net:5432/database\n connection-user=root\n connection-password=secret\n```\n\n(note this is an encrypted file in your repo so safe to edit however you want).\n\nIn the console, you can simply use trino's configuration tab at Configuration -> Helm\n", - "source_links": [], - "id": 129 - }, - { - "page_link": "https://github.com/aquasecurity/trivy-operator", - "title": "trivy readme", - "text": "![Trivy-operator logo](docs/images/trivy-operator-logo.png)\n\n> Kubernetes-native security toolkit. ([Documentation](https://aquasecurity.github.io/trivy-operator/latest))\n\n[![GitHub Release][release-img]][release]\n[![Build Action][action-build-img]][action-build]\n[![Release snapshot Action][action-release-snapshot-img]][action-release-snapshot]\n[![Go Report Card][report-card-img]][report-card]\n[![License][license-img]][license]\n[![GitHub All Releases][github-all-releases-img]][release]\n![Docker Pulls Trivy-operator][docker-pulls-trivy-operator]\n\n\n\n[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/trivy-operator)](https://artifacthub.io/packages/search?repo=trivy-operator)\n\n# Introduction\n\nThere are lots of security tools in the cloud native world, created by Aqua and by others, for identifying and informing\nusers about security issues in Kubernetes workloads and infrastructure components. However powerful and useful they\nmight be, they tend to sit alongside Kubernetes, with each new product requiring users to learn a separate set of\ncommands and installation steps in order to operate them and find critical security information.\n\nThe Trivy-Operator leverages trivy security tools by incorporating their outputs into Kubernetes CRDs\n(Custom Resource Definitions) and from there, making security reports accessible through the Kubernetes API. This way\nusers can find and view the risks that relate to different resources in what we call a Kubernetes-native way.\n\nThe Trivy operator automatically updates security reports in response to workload and other changes on a Kubernetes cluster, generating the following reports:\n\n- Vulnerability Scans: Automated vulnerability scanning for Kubernetes workloads.\n- ConfigAudit Scans: Automated configuration audits for Kubernetes resources with predefined rules or custom Open Policy Agent (OPA) policies.\n- Exposed Secret Scans: Automated secret scans which find and detail the location of exposed Secrets within your cluster.\n- RBAC scans: Role Based Access Control scans provide detailed information on the access rights of the different resources installed.\n\n

\n\"Trivy-operator\n

\n\n# Status\n\nAlthough we are trying to keep new releases backward compatible with previous versions, this project is still incubating,\nand some APIs and [Custom Resource Definitions] may change.\n\n# Usage\n\nThe official [Documentation] provides detailed installation, configuration, troubleshooting, and quick start guides.\n\nYou can install the Trivy-operator Operator with [Static YAML Manifests] and follow the [Getting Started][getting-started-operator]\nguide to see how vulnerability and configuration audit reports are generated automatically.\n\n# Contributing\n\nAt this early stage we would love your feedback on the overall concept of Trivy-Operator. Over time, we'd love to see\ncontributions integrating different security tools so that users can access security information in standard,\nKubernetes-native ways.\n\n* See [Contributing] for information about setting up your development environment, and the contribution workflow that\n we expect.\n* Please ensure that you are following our [Code Of Conduct](https://github.com/aquasecurity/community/blob/main/CODE_OF_CONDUCT.md) during any interaction with the Aqua projects and their community.\n\n---\nTrivy-Operator is an [Aqua Security](https://aquasec.com) open source project. \nLearn about our [Open Source Work and Portfolio]. \nJoin the community, and talk to us about any matter in [GitHub Discussions] or [Slack].\n\n[release-img]: https://img.shields.io/github/release/aquasecurity/trivy-operator.svg?logo=github\n[release]: https://github.com/aquasecurity/trivy-operator/releases\n[action-build-img]: https://github.com/aquasecurity/trivy-operator/actions/workflows/build.yaml/badge.svg\n[action-build]: https://github.com/aquasecurity/trivy-operator/actions/workflows/build.yaml\n[action-release-snapshot-img]: https://github.com/aquasecurity/trivy-operator/actions/workflows/release-snapshot.yaml/badge.svg\n[action-release-snapshot]: https://github.com/aquasecurity/trivy-operator/actions/workflows/release-snapshot.yaml\n[cov-img]: https://codecov.io/github/aquasecurity/trivy-operator/branch/main/graph/badge.svg\n[cov]: https://codecov.io/github/aquasecurity/trivy-operator\n[report-card-img]: https://goreportcard.com/badge/github.com/aquasecurity/trivy-operator\n[report-card]: https://goreportcard.com/report/github.com/aquasecurity/trivy-operator\n[license-img]: https://img.shields.io/github/license/aquasecurity/trivy-operator.svg\n[license]: https://github.com/aquasecurity/trivy-operator/blob/main/LICENSE\n[github-all-releases-img]: https://img.shields.io/github/downloads/aquasecurity/trivy-operator/total?logo=github\n[docker-pulls-trivy-operator]: https://img.shields.io/docker/pulls/aquasec/trivy-operator?logo=docker&label=docker%20pulls%20%2F%20trivy%20operator\n[Contributing]: CONTRIBUTING.md\n[GitHub Discussions]: https://github.com/aquasecurity/trivy-operator/discussions\n[Slack]: https://slack.aquasec.com/\n[Open Source Work and Portfolio]: https://www.aquasec.com/products/open-source-projects/\n\n[Custom Resource Definitions]: https://aquasecurity.github.io/trivy-operator/latest/crds\n[Go module]: https://pkg.go.dev/github.com/aquasecurity/trivy-operator/pkg\n[Documentation]: https://aquasecurity.github.io/trivy-operator/latest\n[Static YAML Manifests]: https://aquasecurity.github.io/trivy-operator/latest/operator/installation/kubectl\n[getting-started-operator]: https://aquasecurity.github.io/trivy-operator/latest/operator\n[Kubernetes operator]: https://aquasecurity.github.io/trivy-operator/latest\n\n[Lens Extension]: https://github.com/aquasecurity/trivy-operator-lens-extension\n[kubectl]: https://kubernetes.io/docs/reference/kubectl\n", - "source_links": [], - "id": 130 - }, - { - "page_link": "https://github.com/typesense/typesense", - "title": "typesense readme", - "text": "

\n \"Typesense\" \n

\n

\n Typesense is a fast, typo-tolerant search engine for building delightful search experiences.\n

\n\n

\n An Open Source Algolia Alternative &
\n An Easier-to-Use ElasticSearch Alternative\n

\n\n

\n \n \n \n

\n

\n Website | \n Documentation | \n Roadmap | \n Slack Community | \n Twitter | \n Office Hours\n

\n
\n

\n \"Typesense\n

\n\n\u2728 Here are a couple of **live demos** that show Typesense in action on large datasets:\n\n- Search a 32M songs dataset from MusicBrainz: [songs-search.typesense.org](https://songs-search.typesense.org/)\n- Search a 28M books dataset from OpenLibrary: [books-search.typesense.org](https://books-search.typesense.org/)\n- Search a 2M recipe dataset from RecipeNLG: [recipe-search.typesense.org](https://recipe-search.typesense.org/)\n- Search 1M Git commit messages from the Linux Kernel: [linux-commits-search.typesense.org](https://linux-commits-search.typesense.org/)\n- Spellchecker with type-ahead, with 333K English words: [spellcheck.typesense.org](https://spellcheck.typesense.org/)\n- An E-Commerce Store Browsing experience: [ecommerce-store.typesense.org](https://ecommerce-store.typesense.org/)\n- GeoSearch / Browsing experience: [airbnb-geosearch.typesense.org](https://airbnb-geosearch.typesense.org/)\n- Search / Browse xkcd comics by topic: [xkcd-search.typesense.org](https://xkcd-search.typesense.org/)\n\n\ud83d\udde3\ufe0f \ud83c\udfa5 If you prefer watching videos:\n\n- Here's one where we introduce Typesense and show a walk-through: https://youtu.be/F4mB0x_B1AE?t=144\n- Here's our [roadmap](https://github.com/orgs/typesense/projects/1) call from Q1 2022: https://aviyel.com/events/297/typesense-community-call-q1-2022-roadmap-and-contributor-spotlight\n- Check out Typesense's recent mention during Google I/O Developer Keynote: https://youtu.be/qBkyU1TJKDg?t=2399\n\n## Quick Links\n\n- [Features](#features)\n- [Benchmarks](#benchmarks)\n- [Roadmap](#roadmap)\n- [Who's using this](#whos-using-this)\n- [Install](#install)\n- [Quick Start](#quick-start)\n- [Step-by-step Walk-through](#step-by-step-walk-through)\n- [API Documentation](#api-documentation)\n- [API Clients](#api-clients)\n- [Search UI Components](#search-ui-components)\n- [FAQ](#faq)\n- [Support](#support)\n- [Contributing](#contributing)\n- [Getting Latest Updates](#getting-latest-updates)\n- [Build from Source](#build-from-source)\n\n## Features\n\n- **Typo Tolerance:** Handles typographical errors elegantly, out-of-the-box.\n- **Simple and Delightful:** Simple to set-up, integrate with, operate and scale.\n- **\u26a1 Blazing Fast:** Built in C++. Meticulously architected from the ground-up for low-latency (<50ms) instant searches.\n- **Tunable Ranking:** Easy to tailor your search results to perfection.\n- **Sorting:** Sort results based on a particular field at query time (helpful for features like \"Sort by Price (asc)\").\n- **Faceting & Filtering:** Drill down and refine results.\n- **Grouping & Distinct:** Group similar results together to show more variety.\n- **Federated Search:** Search across multiple collections (indices) in a single HTTP request.\n- **Geo Search:** Search and sort by results around a geographic location.\n- **Vector search:** support for both exact & HNSW-based approximate vector searching.\n- **Scoped API Keys:** Generate API keys that only allow access to certain records, for multi-tenant applications.\n- **Synonyms:** Define words as equivalents of each other, so searching for a word will also return results for the synonyms defined.\n- **Curation & Merchandizing:** Boost particular records to a fixed position in the search results, to feature them.\n- **Raft-based Clustering:** Setup a distributed cluster that is highly available.\n- **Seamless Version Upgrades:** As new versions of Typesense come out, upgrading is as simple as swapping out the binary and restarting Typesense.\n- **No Runtime Dependencies:** Typesense is a single binary that you can run locally or in production with a single command.\n\n**Don't see a feature on this list?** Search our issue tracker if someone has already requested it and add a comment to it explaining your use-case, or open a new issue if not. We prioritize our roadmap based on user feedback, so we'd love to hear from you. \n\n## Roadmap\n\nHere's Typesense's public roadmap: [https://github.com/orgs/typesense/projects/1](https://github.com/orgs/typesense/projects/1).\n\nThe first column also explains how we prioritize features, how you can influence prioritization and our release cadence. \n\n## Benchmarks\n\n- A dataset containing **2.2 Million recipes** (recipe names and ingredients):\n - Took up about 900MB of RAM when indexed in Typesense\n - Took 3.6mins to index all 2.2M records\n - On a server with 4vCPUs, Typesense was able to handle a concurrency of **104 concurrent search queries per second**, with an average search processing time of **11ms**.\n- A dataset containing **28 Million books** (book titles, authors and categories):\n - Took up about 14GB of RAM when indexed in Typesense\n - Took 78mins to index all 28M records\n - On a server with 4vCPUs, Typesense was able to handle a concurrency of **46 concurrent search queries per second**, with an average search processing time of **28ms**.\n- With a dataset containing **3 Million products** (Amazon product data), Typesense was able to handle a throughput of **250 concurrent search queries per second** on an 8-vCPU 3-node Highly Available Typesense cluster.\n\nWe'd love to benchmark with larger datasets, if we can find large ones in the public domain. If you have any suggestions for structured datasets that are open, please let us know by opening an issue. We'd also be delighted if you're able to share benchmarks from your own large datasets. Please send us a PR! \n\n## Who's using this?\n\nTypesense is used by a range of users across different industries. We've only recently started documenting who's using it in our [Showcase](SHOWCASE.md).\n\nIf you'd like to be included in the list, please feel free to edit [SHOWCASE.md](SHOWCASE.md) and send us a PR.\n\nYou'll also see a list of user logos on the [Typesense Cloud](https://cloud.typesense.org) home page.\n\n## Install\n\n**Option 1:** You can download the [binary packages](https://typesense.org/downloads) that we publish for \nLinux (x86-64) and Mac.\n\n**Option 2:** You can also run Typesense from our [official Docker image](https://hub.docker.com/r/typesense/typesense).\n\n**Option 3:** Spin up a managed cluster with [Typesense Cloud](https://cloud.typesense.org):\n\n\"Deploy \n\n## Quick Start\n\nHere's a quick example showcasing how you can create a collection, index a document and search it on Typesense.\n \nLet's begin by starting the Typesense server via Docker:\n\n```\ndocker run -p 8108:8108 -v/tmp/data:/data typesense/typesense:0.24.1 --data-dir /data --api-key=Hu52dwsas2AdxdE\n```\n\nWe have [API Clients](#api-clients) in a couple of languages, but let's use the Python client for this example.\n\nInstall the Python client for Typesense:\n \n```\npip install typesense\n```\n\nWe can now initialize the client and create a `companies` collection:\n\n```python\nimport typesense\n\nclient = typesense.Client({\n 'api_key': 'Hu52dwsas2AdxdE',\n 'nodes': [{\n 'host': 'localhost',\n 'port': '8108',\n 'protocol': 'http'\n }],\n 'connection_timeout_seconds': 2\n})\n\ncreate_response = client.collections.create({\n \"name\": \"companies\",\n \"fields\": [\n {\"name\": \"company_name\", \"type\": \"string\" },\n {\"name\": \"num_employees\", \"type\": \"int32\" },\n {\"name\": \"country\", \"type\": \"string\", \"facet\": True }\n ],\n \"default_sorting_field\": \"num_employees\"\n})\n```\n\nNow, let's add a document to the collection we just created:\n\n```python\ndocument = {\n \"id\": \"124\",\n \"company_name\": \"Stark Industries\",\n \"num_employees\": 5215,\n \"country\": \"USA\"\n}\n\nclient.collections['companies'].documents.create(document)\n```\n\nFinally, let's search for the document we just indexed:\n\n```python\nsearch_parameters = {\n 'q' : 'stork',\n 'query_by' : 'company_name',\n 'filter_by' : 'num_employees:>100',\n 'sort_by' : 'num_employees:desc'\n}\n\nclient.collections['companies'].documents.search(search_parameters)\n```\n\n**Did you notice the typo in the query text?** No big deal. Typesense handles typographic errors out-of-the-box!\n\n## Step-by-step Walk-through\n\nA step-by-step walk-through is available on our website [here](https://typesense.org/guide). \n\nThis will guide you through the process of starting up a Typesense server, indexing data in it and querying the data set. \n\n## API Documentation\n\nHere's our official API documentation, available on our website: [https://typesense.org/api](https://typesense.org/api).\n\nIf you notice any issues with the documentation or walk-through, please let us know or send us a PR here: [https://github.com/typesense/typesense-website](https://github.com/typesense/typesense-website).\n\n## API Clients\n\nWhile you can definitely use CURL to interact with Typesense Server directly, we offer official API clients to simplify using Typesense from your language of choice. The API Clients come built-in with a smart retry strategy to ensure that API calls made via them are resilient, especially in an HA setup.\n\n- [typesense-js](https://github.com/typesense/typesense-js)\n- [typesense-php](https://github.com/typesense/typesense-php)\n- [typesense-python](https://github.com/typesense/typesense-python)\n- [typesense-ruby](https://github.com/typesense/typesense-ruby)\n\nIf we don't offer an API client in your language, you can still use any popular HTTP client library to access Typesense's APIs directly. \n\nHere are some community-contributed clients and integrations:\n\n- [API client for Go](https://github.com/typesense/typesense-go)\n- [API client for Dart](https://github.com/typesense/typesense-dart)\n- [API client for C#](https://github.com/DAXGRID/typesense-dotnet)\n- [Laravel Scout driver](https://github.com/devloopsnet/laravel-scout-typesense-engine)\n- [Symfony integration](https://github.com/acseo/TypesenseBundle)\n\nWe welcome community contributions to add more official client libraries and integrations. Please reach out to us at contact@typsense.org or open an issue on Github to collaborate with us on the architecture. \ud83d\ude4f\n\n## Search UI Components\n\nYou can use our [InstantSearch.js adapter](https://github.com/typesense/typesense-instantsearch-adapter) \nto quickly build powerful search experiences, complete with filtering, sorting, pagination and more.\n\nHere's how: [https://typesense.org/docs/0.24.1/guide/#search-ui](https://typesense.org/docs/0.24.1/guide/#search-ui) \n\n## FAQ\n\n### How does this differ from Elasticsearch?\n\nElasticsearch is a large piece of software, that takes non-trivial amount of effort to setup, administer, scale and fine-tune. \nIt offers you a few thousand configuration parameters to get to your ideal configuration. So it's better suited for large teams \nwho have the bandwidth to get it production-ready, regularly monitor it and scale it, especially when they have a need to store \nbillions of documents and petabytes of data (eg: logs).\n\nTypesense is built specifically for decreasing the \"time to market\" for a delightful search experience. It's a light-weight\nyet powerful & scaleable alternative that focuses on Developer Happiness and Experience with a clean well-documented API, clear semantics \nand smart defaults so it just works well out-of-the-box, without you having to turn many knobs.\n\nElasticsearch also runs on the JVM, which by itself can be quite an effort to tune to run optimally. Typesense, on the other hand, \nis a single light-weight self-contained native binary, so it's simple to setup and operate.\n\nSee a side-by-side feature comparison [here](https://typesense.org/typesense-vs-algolia-vs-elasticsearch-vs-meilisearch/).\n\n### How does this differ from Algolia?\n\nAlgolia is a proprietary, hosted, search-as-a-service product that works well, when cost is not an issue. From our experience,\nfast growing sites and apps quickly run into search & indexing limits, accompanied by expensive plan upgrades as they scale.\n\nTypesense on the other hand is an open-source product that you can run on your own infrastructure or\nuse our managed SaaS offering - [Typesense Cloud](https://cloud.typesense.org). \nThe open source version is free to use (besides of course your own infra costs). \nWith Typesense Cloud we don't charge by records or search operations. Instead, you get a dedicated cluster\nand you can throw as much data and traffic at it as it can handle. You only pay a fixed hourly cost & bandwidth charges \nfor it, depending on the configuration your choose, similar to most modern cloud platforms. \n\nFrom a product perspective, Typesense is closer in spirit to Algolia than Elasticsearch. \nHowever, we've addressed some important limitations with Algolia: \n\nAlgolia requires separate indices for each sort order, which counts towards your plan limits. Most of the index settings like \nfields to search, fields to facet, fields to group by, ranking settings, etc \nare defined upfront when the index is created vs being able to set them on the fly at query time.\n\nWith Typesense, these settings can be configured at search time via query parameters which makes it very flexible\nand unlocks new use cases. Typesense is also able to give you sorted results with a single index, vs having to create multiple.\nThis helps reduce memory consumption.\n\nAlgolia offers the following features that Typesense does not have currently: personalization & server-based search analytics. For analytics, you can still instrument your search on the client-side and send search metrics to your web analytics tool of choice. \n\nWe intend to bridge this gap in Typesense, but in the meantime, please let us know\nif any of these are a show stopper for your use case by creating a feature request in our issue tracker. \n\nSee a side-by-side feature comparison [here](https://typesense.org/typesense-vs-algolia-vs-elasticsearch-vs-meilisearch/).\n\n### Speed is great, but what about the memory footprint?\n\nA fresh Typesense server will consume about 30 MB of memory. As you start indexing documents, the memory use will \nincrease correspondingly. How much it increases depends on the number and type of fields you index. \n\nWe've strived to keep the in-memory data structures lean. To give you a rough idea: when 1 million \nHacker News titles are indexed along with their points, Typesense consumes 165 MB of memory. The same size of that data \non disk in JSON format is 88 MB. If you have any numbers from your own datasets that we can add to this section, please send us a PR!\n\n### Why the GPL license?\n\nFrom our experience companies are generally concerned when **libraries** they use are GPL licensed, since library code is directly integrated into their code and will lead to derivative work and trigger GPL compliance. However, Typesense Server is **server software** and we expect users to typically run it as a separate daemon, and not integrate it with their own code. GPL covers and allows for this use case generously **(eg: Linux is GPL licensed)**. Now, AGPL is what makes server software accessed over a network result in derivative work and not GPL. And for that reason we\u2019ve opted to not use AGPL for Typesense. \n\nNow, if someone makes modifications to Typesense server, GPL actually allows you to still keep the modifications to yourself as long as you don't distribute the modified code. So a company can for example modify Typesense server and run the modified code internally and still not have to open source their modifications, as long as they make the modified code available to everyone who has access to the modified software.\n\nNow, if someone makes modifications to Typesense server and distributes the modifications, that's where GPL kicks in. Given that we\u2019ve published our work to the community, we'd like for others' modifications to also be made open to the community in the spirit of open source. **We use GPL for this purpose.** Other licenses would allow our open source work to be modified, made closed source and distributed, which we want to avoid with Typesense for the project\u2019s long term sustainability.\n\nHere's more background on why GPL, as described by Discourse: https://meta.discourse.org/t/why-gnu-license/2531. Many of the points mentioned there resonate with us.\n\nNow, all of the above only apply to Typesense Server. Our client libraries are indeed meant to be integrated into our users\u2019 code and so they use Apache license.\n\nSo in summary, AGPL is what is usually problematic for server software and we\u2019ve opted not to use it. We believe GPL for Typesense Server captures the essence of what we want for this open source project. GPL has a long history of successfully being used by popular open source projects. Our libraries are still Apache licensed.\n\nIf you have specifics that prevent you from using Typesense due to a licensing issue, we're happy to explore this topic further with you. Please reach out to us.\n\n## Support\n\n\ud83d\udc4b \ud83c\udf10 New: If you have general questions about Typesense, want to say hello or just follow along, we'd like to invite you to join our [Slack Community](https://join.slack.com/t/typesense-community/shared_invite/zt-mx4nbsbn-AuOL89O7iBtvkz136egSJg). \n\nWe also do virtual office hours every Friday. Reserve a time slot [here](https://calendly.com/jason-typesense/typesense-office-hours).\n\nIf you run into any problems or issues, please create a Github issue and we'll try our best to help.\n\nWe strive to provide good support through our issue trackers on Github. However, if you'd like to receive private & prioritized support with:\n\n- Guaranteed SLAs\n- Phone / video calls to discuss your specific use case and get recommendations on best practices\n- Private discussions over Slack\n- Guidance around deployment, ops and scaling best practices\n- Prioritized feature requests\n\nWe do offer Paid Support options. Please reach out to us at contact@typesense.org to sign up.\n\n## Contributing\n\nWe are a lean team on a mission to democratize search and we'll take all the help we can get! If you'd like to get involved, here's information on where we could use your help: [Contributing.md](https://github.com/typesense/typesense/blob/master/CONTRIBUTING.md)\n\n## Getting Latest Updates\n\nIf you'd like to get updates when we release new versions, click on the \"Watch\" button on the top and select \"Releases only\". Github will then send you notifications along with a changelog with each new release.\n\nWe also post updates to our Twitter account about releases and additional topics related to Typesense. Follow us here: [@typesense](https://twitter.com/typesense).\n\n\ud83d\udc4b \ud83c\udf10 New: We'll also post updates on our [Slack Community](https://join.slack.com/t/typesense-community/shared_invite/zt-mx4nbsbn-AuOL89O7iBtvkz136egSJg). \n\n## Build from source\n\n**Building with Docker**\n\nThe docker build script takes care of all required dependencies, so it's the easiest way to build Typesense:\n\n```\nTYPESENSE_VERSION=nightly ./docker-build.sh --build-deploy-image --create-binary [--clean] [--depclean]\n```\n\n**Building on your machine**\n\nTypesense requires the following dependencies: \n\n* C++11 compatible compiler (GCC >= 4.9.0, Apple Clang >= 8.0, Clang >= 3.9.0)\n* Snappy\n* zlib\n* OpenSSL (>=1.0.2)\n* curl\n* ICU\n* brpc\n* braft\n\n```\n./build.sh --create-binary [--clean] [--depclean]\n```\n\nThe first build will take some time since other third-party libraries are pulled and built as part of the build process.\n\n---\n© 2016-present Typesense Inc.\n", - "source_links": [], - "id": 131 - }, - { - "page_link": "https://github.com/Unleash/unleash", - "title": "unleash readme", - "text": "
\n\n\n \"The\n\n\n
\n
\n\n[![Build and Tests](https://img.shields.io/github/actions/workflow/status/Unleash/unleash/build.yaml?branch=main)](https://github.com/Unleash/unleash/actions/workflows/build.yaml) [![Coverage Status](https://coveralls.io/repos/github/Unleash/unleash/badge.svg?branch=main)](https://coveralls.io/github/Unleash/unleash?branch=main) [![Docker Pulls](https://img.shields.io/docker/pulls/unleashorg/unleash-server)](https://hub.docker.com/r/unleashorg/unleash-server) [![Apache-2.0 license](https://img.shields.io/github/license/unleash/unleash)](https://github.com/Unleash/unleash/blob/main/LICENSE) [![Join Unleash on Slack](https://img.shields.io/badge/slack-join-635dc5?logo=slack)](https://slack.unleash.run)\n\n[Open Live Demo \u2192](https://www.getunleash.io/interactive-demo)\n\n
\n\n## About Unleash\n\nUnleash is an open source feature management solution. It improves the workflow of your development team and leads to quicker software delivery. Unleash increases efficiency and gives teams _full control_ of how and when they enable new functionality for end users. Unleash lets teams ship code to production in _smaller_ releases _whenever_ they want.\n\nFeature toggles make it easy to test how your code works with real production data without the fear that you'll accidentally break your users' experience. It also helps your team work on multiple features in parallel without each maintaining an separate feature branch.\n\nUnleash is the largest open source solution for feature flagging on GitHub. There's 12 official client and server SDKs and 10+ community SDKs available; you can even make your own if you want to. You can use Unleash with any language and any framework.\n\n
\n\n## Get started in 2 steps\n\n### 1. Start Unleash\n\nWith [`git`](https://git-scm.com/) and [`docker`](https://www.docker.com/) installed, it's easy to get started:\n\nRun this script:\n\n```bash\ngit clone git@github.com:Unleash/unleash.git\ncd unleash\ndocker compose up -d\n```\n\nThen point your browser to `localhost:4242` and log in using:\n\n- username: `admin`\n- password: `unleash4all`\n\nIf you'd rather run the source code in this repo directly via Node.js, see the [step-by-step instructions to get up and running in the contributing guide](./CONTRIBUTING.md#how-to-run-the-project).\n\n### 2. Connect your SDK\n\nFind your preferred SDK in [our list of official SDKs](#unleash-sdks) and import it into your project. Follow the setup guides for your specific SDK.\n\nIf you use the docker compose file from the previous step, here's the configuration details you'll need to get going:\n\n- For front-end SDKs, use:\n - URL: `http://localhost:4242/api/frontend/`\n - `clientKey`: `default:development.unleash-insecure-frontend-api-token`\n- For server-side SDKs, use:\n - Unleash API URL: `http://localhost:4242/api/`\n - API token: `default:development.unleash-insecure-api-token`\n\nIf you use a different setup, your configuration details will most likely also be different.\n\n### Check a feature toggle\n\nChecking the state of a feature toggle in your code is easy! The syntax will vary depending on your language, but all you need is a simple function call to check whether a toggle is available. Here's how it might look in Java:\n\n```java\nif (unleash.isEnabled(\"AwesomeFeature\")) {\n // do new, flashy thing\n} else {\n // do old, boring stuff\n}\n```\n\n### Run Unleash on a service?\n\nIf you don't want to run Unleash locally, we also provide easy deployment setups for Heroku and Digital Ocean:\n\n[![Deploy to Heroku](./.github/deploy-heroku-20.png)](https://www.heroku.com/deploy/?template=https://github.com/Unleash/unleash) [![Deploy to DigitalOcean](./.github/deploy-digital.svg)](https://cloud.digitalocean.com/apps/new?repo=https://github.com/Unleash/unleash/tree/main&refcode=0e1d75187044)\n\n### Configure and run Unleash anywhere\n\nThe above sections show you how to get up and running quickly and easily. When you're ready to start configuring and customizing Unleash for your own environment, check out the documentation for [getting started with self-managed deployments](https://docs.getunleash.io/reference/deploy/getting-started), [Unleash configuration options](https://docs.getunleash.io/reference/deploy/configuring-unleash), or [running Unleash locally via docker](https://docs.getunleash.io/tutorials/quickstart#i-want-to-run-unleash-locally).\n\n
\n\n## Online demo\n\nTry out [the Unleash online demo](https://www.getunleash.io/interactive-demo).\n\n[![The Unleash online demo](./.github/github_online_demo.svg)](https://www.getunleash.io/interactive-demo)\n\n
\n\n## Community and help \u2014 sharing is caring\n\nWe know that learning a new tool can be hard and time-consuming. We have a growing community that loves to help out. Please don't hesitate to reach out for help.\n\n[![Join Unleash on Slack](https://img.shields.io/badge/slack-join-635dc5?logo=slack)](https://slack.unleash.run)\n\n\ud83d\udcac [Join Unleash on Slack](https://slack.unleash.run) if you want ask open questions about Unleash, feature toggling or discuss these topics in general.\n\n\ud83d\udcbb [Create a GitHub issue](https://github.com/Unleash/unleash/issues/new) if you have found a bug or have ideas on how to improve Unleash.\n\n\ud83d\udcda [Visit the documentation](https://docs.getunleash.io/) for more in-depth descriptions, how-to guides, and more.\n\n
\n\n## Contribute to Unleash\n\nBuilding Unleash is a collaborative effort, and we owe a lot of gratitude to many smart and talented individuals. Building it together with community ensures that we build a product that solves real problems for real people. We'd love to have your help too: Please feel free to open issues or provide pull requests.\n\nCheck out [the CONTRIBUTING.md file](./CONTRIBUTING.md) for contribution guidelines and the [Unleash developer guide](./website/docs/contributing/developer-guide.md) for tips on environment setup, running the tests, and running Unleash from source.\n\n### Contributors\n\n
\n\n[![The Unleash contributors](https://cdn.getunleash.io/docs-assets/contributors.svg)](https://github.com/Unleash/unleash/graphs/contributors)\n\n
\n\n
\n\n## Features our users love\n\n### Flexibility and adaptability\n\n- Get an easy overview of all feature toggles across all your environments, applications and services\n- Use included [activation strategies](https://docs.getunleash.io/reference/activation-strategies) for most common use cases, or use a [custom activation strategy](https://docs.getunleash.io/reference/custom-activation-strategies) to support any need you might have\n- Organise feature toggles by [feature toggle tags](https://docs.getunleash.io/reference/tags)\n- [Canary releases / gradual rollouts](https://docs.getunleash.io/reference/activation-strategies#gradual-rollout)\n- Targeted releases: release features to specific [users](https://docs.getunleash.io/reference/activation-strategies#userids), [IPs](https://docs.getunleash.io/reference/activation-strategies#ips), or [hostnames](https://docs.getunleash.io/reference/activation-strategies#hostnames)\n- [Kill switches](https://docs.getunleash.io/reference/feature-toggle-types#feature-toggle-types)\n- [A/B testing](https://docs.getunleash.io/topics/a-b-testing)\n- 2 [environments](https://docs.getunleash.io/reference/environments)\n- Out-of-the-box integrations with popular tools ([Slack](https://docs.getunleash.io/addons/slack), [Microsoft Teams](https://docs.getunleash.io/addons/teams), [Datadog](https://docs.getunleash.io/addons/datadog)) + integrate with anything with [webhooks](https://docs.getunleash.io/addons/webhook)\n- [Dashboard for managing technical debt](https://docs.getunleash.io/reference/technical-debt) and [stale toggles](https://docs.getunleash.io/reference/technical-debt#stale-and-potentially-stale-toggles)\n- API-first: _everything_ can be automated. No exceptions.\n- [12 official client SDKs](https://docs.getunleash.io/reference/sdks#official-sdks), and ten [community-contributed client SDKs](https://docs.getunleash.io/reference/sdks#community-sdks)\n- Run it via Docker with the [official Docker image](https://hub.docker.com/r/unleashorg/unleash-server) or as a pure Node.js application\n\n### Security and performance\n\n- Privacy by design (GDPR and Schrems II). End-user data never leaves your application.\n- [Audit logs](https://docs.getunleash.io/advanced/audit_log)\n- Enforce [OWASP's secure headers](https://owasp.org/www-project-secure-headers/) via the strict HTTPS-only mode\n- Flexible hosting options: host it on premise or in the cloud (_any_ cloud)\n- Scale [the Unleash Proxy](https://docs.getunleash.io/reference/unleash-proxy) independently of the Unleash server to support any number of front-end clients without overloading your Unleash instance\n\n### Looking for more features?\n\nIf you're looking for one of the following features, please take a look at our [Pro and Enterprise plans](https://www.getunleash.io/plans):\n\n- [role-based access control (RBAC)](https://docs.getunleash.io/reference/rbac)\n- [single sign-on (SSO)](https://docs.getunleash.io/reference/sso)\n- more environments\n- [feature toggles project support](https://docs.getunleash.io/reference/projects)\n- [advanced segmentation](https://docs.getunleash.io/reference/segments)\n- [additional strategy constraints](https://docs.getunleash.io/reference/strategy-constraints)\n- tighter security\n- more hosting options (we can even host it for you!)\n\n
\n\n## Architecture\n\n\n\nRead more in the [_system overview_ section of the Unleash documentation](https://docs.getunleash.io/tutorials/unleash_overview#system-overview).\n\n
\n\n## Unleash SDKs\n\nTo connect your application to Unleash you'll need to use a client SDK for your programming language.\n\n**Official server-side SDKs:**\n\n- [Go SDK](https://docs.getunleash.io/reference/sdks/go)\n- [Java SDK](https://docs.getunleash.io/reference/sdks/java)\n- [Node.js SDK](https://docs.getunleash.io/reference/sdks/node)\n- [PHP SDK](https://docs.getunleash.io/reference/sdks/php)\n- [Python SDK](https://docs.getunleash.io/reference/sdks/python)\n- [Ruby SDK](https://docs.getunleash.io/reference/sdks/ruby)\n- [Rust SDK](https://github.com/unleash/unleash-client-rust)\n- [.NET SDK](https://docs.getunleash.io/reference/sdks/dotnet)\n\n**Official front-end SDKs:**\n\nThe front-end SDKs connects via the [Unleash Proxy](https://docs.getunleash.io/reference/unleash-proxy) in order to ensure privacy, scalability and security.\n\n- [Android Proxy SDK](https://docs.getunleash.io/reference/sdks/android-proxy)\n- [Flutter Proxy SDK](https://docs.getunleash.io/reference/sdks/flutter)\n- [iOS Proxy SDK](https://docs.getunleash.io/reference/sdks/ios-proxy)\n- [JavaScript Proxy SDK](https://docs.getunleash.io/reference/sdks/javascript-browser)\n- [React Proxy SDK](https://docs.getunleash.io/reference/sdks/react)\n- [Svelte Proxy SDK](https://docs.getunleash.io/reference/sdks/svelte)\n- [Vue Proxy SDK](https://docs.getunleash.io/reference/sdks/vue)\n\n**Community SDKs:**\n\nIf none of the official SDKs fit your need, there's also a number of [community-developed SDKs](https://docs.getunleash.io/reference/sdks#community-sdks) where you might find an implementation for your preferred language (such as [Elixir](https://gitlab.com/afontaine/unleash_ex), [Dart](https://pub.dev/packages/unleash), [Clojure](https://github.com/AppsFlyer/unleash-client-clojure), and more).\n\n
\n\n## Users of Unleash\n\n**Unleash is trusted by thousands of companies all over the world**.\n\n**Proud Open-Source users:** (send us a message if you want to add your logo here)\n\n![The Unleash logo encircled by logos for Finn.no, nav (the Norwegian Labour and Welfare Administration), Budgets, Otovo, and Amedia. The encircling logos are all connected to the Unleash logo.](./.github/github_unleash_users.svg)\n\n
\n\n## Migration guides\n\nUnleash has evolved significantly over the past few years, and we know how hard it can be to keep software up to date. If you're using the current major version, upgrading shouldn't be an issue. If you're on a previous major version, check out the [Unleash migration guide](https://docs.getunleash.io/deploy/migration_guide)!\n\n
\n\n## Want to know more about Unleash?\n\n### Videos and podcasts\n\n- [The Unleash YouTube channel](https://www.youtube.com/channel/UCJjGVOc5QBbEje-r7nZEa4A)\n- [_Feature toggles \u2014 Why and how to add to your software_ \u2014 freeCodeCamp (YouTube)](https://www.youtube.com/watch?v=-yHZ9uLVSp4&t=0s)\n- [_Feature flags with Unleash_ \u2014 The Code Kitchen (podcast)](https://share.fireside.fm/episode/zD-4e4KI+Pr379KBv)\n- [_Feature Flags og Unleash med Fredrik Oseberg_ \u2014 Utviklerpodden (podcast; Norwegian)](https://pod.space/utviklerpodden/feature-flags-og-unleash-med-fredrik-oseberg)\n\n### Articles and more\n\n- [The Unleash Blog](https://www.getunleash.io/blog)\n- [_Designing the Rust Unleash API client_ \u2014 Medium](https://medium.com/cognite/designing-the-rust-unleash-api-client-6809c95aa568)\n- [_FeatureToggle_ by Martin Fowler](http://martinfowler.com/bliki/FeatureToggle.html)\n- [_Feature toggling transient errors in load tests_ \u2014 nrkbeta](https://nrkbeta.no/2021/08/23/feature-toggling-transient-errors-in-load-tests/)\n- [_An Interview with Ivar of Unleash_ \u2014 Console](https://console.substack.com/p/console-42)\n- [_Unleash your features gradually_](http://ivarconr.github.io/feature-toggles-presentation/sch-dev-lunch-2017/#1 ' '), slideshow/presentation by Ivar, the creator of Unleash\n", - "source_links": [], - "id": 132 - }, - { - "page_link": null, - "title": "valheim readme", - "text": null, - "source_links": [], - "id": 133 - }, - { - "page_link": null, - "title": "vault readme", - "text": null, - "source_links": [], - "id": 134 - }, - { - "page_link": "https://github.com/dani-garcia/vaultwarden", - "title": "vaultwarden readme", - "text": "### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.\n\n\ud83d\udce2 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.\n\n---\n\n[![Docker Pulls](https://img.shields.io/docker/pulls/vaultwarden/server.svg)](https://hub.docker.com/r/vaultwarden/server)\n[![Dependency Status](https://deps.rs/repo/github/dani-garcia/vaultwarden/status.svg)](https://deps.rs/repo/github/dani-garcia/vaultwarden)\n[![GitHub Release](https://img.shields.io/github/release/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/releases/latest)\n[![GPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)\n[![Matrix Chat](https://img.shields.io/matrix/vaultwarden:matrix.org.svg?logo=matrix)](https://matrix.to/#/#vaultwarden:matrix.org)\n\nImage is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).\n\n**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**\n\n#### \u26a0\ufe0f**IMPORTANT**\u26a0\ufe0f: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.\n\n---\n\n## Features\n\nBasically full implementation of Bitwarden API is provided including:\n\n * Organizations support\n * Attachments\n * Vault API support\n * Serving the static files for Vault interface\n * Website icons API\n * Authenticator and U2F support\n * YubiKey and Duo support\n\n## Installation\nPull the docker image and mount a volume from the host for persistent storage:\n\n```sh\ndocker pull vaultwarden/server:latest\ndocker run -d --name vaultwarden -v /vw-data/:/data/ -p 80:80 vaultwarden/server:latest\n```\nThis will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.\n\n**IMPORTANT**: Some web browsers, like Chrome, disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault from HTTPS. \n\nThis can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).\n\nIf you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).\n\n## Usage\nSee the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.\n\n## Get in touch\nTo ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://vaultwarden.discourse.group/).\n\nIf you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure there aren't any similar issues open, though!\n\nIf you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!\n\n### Sponsors\nThanks for your contribution to the project!\n\n\n\n\n \n \n \n \n \n \n
\n \n Chris Alfano\n \n
\n \n Numberly\n \n
\n", - "source_links": [], - "id": 135 - }, - { - "page_link": "https://github.com/weaviate/weaviate", - "title": "weaviate readme", - "text": "

Weaviate Weaviate logo

\n\n[![Build Status](https://github.com/weaviate/weaviate/actions/workflows/.github/workflows/pull_requests.yaml/badge.svg?branch=master)](https://github.com/weaviate/weaviate/actions/workflows/.github/workflows/pull_requests.yaml)\n[![Go Report Card](https://goreportcard.com/badge/github.com/weaviate/weaviate)](https://goreportcard.com/report/github.com/weaviate/weaviate)\n[![Coverage Status](https://codecov.io/gh/weaviate/weaviate/branch/master/graph/badge.svg)](https://codecov.io/gh/weaviate/weaviate)\n[![Slack](https://img.shields.io/badge/slack--channel-blue?logo=slack)](https://weaviate.io/slack)\n\n## Overview\n\nWeaviate is an **open source \u200bvector database** that is robust, scalable, cloud-native, and fast.\n\nIf you just want to get started, great! Try:\n- the [quickstart tutorial](https://weaviate.io/developers/weaviate/quickstart) if you are looking to use Weaviate, or\n- the [contributor guide](https://weaviate.io/developers/contributor-guide) if you are looking to contribute to the project.\n\nAnd you can find our [documentation here](https://weaviate.io/developers/weaviate/).\n\nIf you have a bit more time, stick around and check out our summary below \ud83d\ude09\n\n-----\n\n## Why Weaviate?\n\nWith Weaviate, you can turn your text, images and more into a searchable vector database using state-of-the-art ML models.\n\nSome of its highlights are:\n\n### Speed\n\nWeaviate typically performs a 10-NN neighbor search out of millions of objects in single-digit milliseconds. See [benchmarks](https://weaviate.io/developers/weaviate/benchmarks).\n\n### Flexibility\n\nYou can use Weaviate to conveniently **vectorize your data at import time**, or alternatively you can **upload your own vectors**.\n\nThese vectorization options are enabled by Weaviate modules. Modules enable use of popular services and model hubs such as [OpenAI](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-openai), [Cohere](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-cohere) or [HuggingFace](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-huggingface) and much more, including use of local and custom models.\n\n### Production-readiness\n\nWeaviate is designed to take you from **rapid prototyping** all the way to **production at scale**.\n\nTo this end, Weaviate is built with [scaling](https://weaviate.io/developers/weaviate/concepts/cluster), [replication](https://weaviate.io/developers/weaviate/concepts/replication-architecture), and [security](https://weaviate.io/developers/weaviate/configuration/authentication) in mind, among others.\n\n### Beyond search\n\nWeaviate powers lightning-fast vector searches, but it is capable of much more. Some of its other superpowers include **recommendation**, **summarization**, and **integrations with neural search frameworks**.\n\n## What can you build with Weaviate?\n\nFor starters, you can build vector databases with text, images, or a combination of both.\n\nYou can also build question and answer extraction, summarization and classification systems.\n\nYou can find [code examples here](https://github.com/weaviate/weaviate-examples), and you might blog posts like these useful:\n\n- [How to build an Image Search Application with Weaviate](https://weaviate.io/blog/how-to-build-an-image-search-application-with-weaviate)\n- [Cohere Multilingual ML Models with Weaviate](https://weaviate.io/blog/cohere-multilingual-with-weaviate)\n- [The Sphere Dataset in Weaviate](https://weaviate.io/blog/sphere-dataset-in-weaviate)\n\n## Weaviate content\n\nSpeaking of content - we love connecting with our community through these. We love helping amazing people build cool things with Weaviate, and we love getting to know them as well as talking to them about their passions.\n\nTo this end, our team does an amazing job with our [blog](https://weaviate.io/blog) and [podcast](https://weaviate.io/podcast).\n\nSome of our past favorites include:\n\n### \ud83d\udcdd Blogs\n\n- [Why is vector search so fast?](https://weaviate.io/blog/Why-is-Vector-Search-so-fast)\n- [Cohere Multilingual ML Models with Weaviate](https://weaviate.io/blog/Cohere-multilingual-with-weaviate)\n- [Vamana vs. HNSW - Exploring ANN algorithms Part 1](https://weaviate.io/blog/ann-algorithms-vamana-vs-hnsw)\n\n### \ud83c\udf99\ufe0f Podcasts\n\n- [Neural Magic in Weaviate](https://www.youtube.com/watch?v=leGgjIQkVYo)\n- [BERTopic](https://www.youtube.com/watch?v=IwXOaHanfUU)\n- [Jina AI's Neural Search Framework](https://www.youtube.com/watch?v=o6MD0tWl0SM)\n\nBoth our [\ud83d\udcdd blogs](https://weaviate.io/blog) and [\ud83c\udf99\ufe0f podcasts](https://weaviate.io/podcast) are updated regularly. To keep up to date with all things Weaviate including new software releases, meetup news and of course all of the content, you can subscribe to our [\ud83d\uddde\ufe0f newsletter](https://newsletter.weaviate.io/).\n\n## Join our community!\n\nAlso, we invite you to join our [Slack](https://weaviate.io/slack) community. There, you can meet other Weaviate users and members of the Weaviate team to talk all things Weaviate and AI (and other topics!).\n\nYou can also say hi to us below:\n- [Twitter](https://twitter.com/weaviate_io)\n- [LinkedIn](https://www.linkedin.com/company/weaviate-io)\n\nOr connect to us via:\n- [Stack Overflow for questions](https://stackoverflow.com/questions/tagged/weaviate)\n- [GitHub for issues](https://github.com/weaviate/weaviate/issues)\n\n-----\n\n## Weaviate helps ...\n\n1. **Software Engineers** ([docs](https://weaviate.io/developers/weaviate/current/)) - Who use Weaviate as an ML-first database for your applications.\n * Out-of-the-box modules for: NLP/semantic search, automatic classification and image similarity search.\n * Easy to integrate into your current architecture, with full CRUD support like you're used to from other OSS databases.\n * Cloud-native, distributed, runs well on Kubernetes and scales with your workloads.\n\n2. **Data Engineers** ([docs](https://weaviate.io/developers/weaviate/current/)) - Who use Weaviate as a vector database that is built up from the ground with ANN at its core, and with the same UX they love from Lucene-based search engines.\n * Weaviate has a modular setup that allows you to use your ML models inside Weaviate, but you can also use out-of-the-box ML models (e.g., SBERT, ResNet, fasttext, etc).\n * Weaviate takes care of the scalability, so that you don't have to.\n * Deploy and maintain ML models in production reliably and efficiently.\n\n3. **Data Scientists** ([docs](https://weaviate.io/developers/weaviate/current/)) - Who use Weaviate for a seamless handover of their Machine Learning models to MLOps.\n * Deploy and maintain your ML models in production reliably and efficiently.\n * Weaviate's modular design allows you to easily package any custom trained model you want.\n * Smooth and accelerated handover of your Machine Learning models to engineers.\n\n## Interfaces\n\nYou can use Weaviate with any of these clients:\n\n- [Python](https://weaviate.io/developers/weaviate/client-libraries/python)\n- [Javascript](https://weaviate.io/developers/weaviate/client-libraries/javascript)\n- [Go](https://weaviate.io/developers/weaviate/client-libraries/go)\n- [Java](https://weaviate.io/developers/weaviate/client-libraries/java)\n\nYou can also use its GraphQL API to retrieve objects and properties.\n\n### GraphQL interface demo\n\n\"Demo\n\n\n## Additional material\n\n### Reading\n\n- [Weaviate is an open-source search engine powered by ML, vectors, graphs, and GraphQL (ZDNet)](https://www.zdnet.com/article/weaviate-an-open-source-search-engine-powered-by-machine-learning-vectors-graphs-and-graphql/)\n- [Weaviate, an ANN Database with CRUD support (DB-Engines.com)](https://db-engines.com/en/blog_post/87)\n- [A sub-50ms neural search with DistilBERT and Weaviate (Towards Datascience)](https://towardsdatascience.com/a-sub-50ms-neural-search-with-distilbert-and-weaviate-4857ae390154)\n- [Getting Started with Weaviate Python Library (Towards Datascience)](https://towardsdatascience.com/getting-started-with-weaviate-python-client-e85d14f19e4f)\n", - "source_links": [], - "id": 136 - }, - { - "page_link": "https://github.com/WireGuard/wireguard-go", - "title": "wireguard readme", - "text": "# Go Implementation of [WireGuard](https://www.wireguard.com/)\n\nThis is an implementation of WireGuard in Go.\n\n## Usage\n\nMost Linux kernel WireGuard users are used to adding an interface with `ip link add wg0 type wireguard`. With wireguard-go, instead simply run:\n\n```\n$ wireguard-go wg0\n```\n\nThis will create an interface and fork into the background. To remove the interface, use the usual `ip link del wg0`, or if your system does not support removing interfaces directly, you may instead remove the control socket via `rm -f /var/run/wireguard/wg0.sock`, which will result in wireguard-go shutting down.\n\nTo run wireguard-go without forking to the background, pass `-f` or `--foreground`:\n\n```\n$ wireguard-go -f wg0\n```\n\nWhen an interface is running, you may use [`wg(8)`](https://git.zx2c4.com/wireguard-tools/about/src/man/wg.8) to configure it, as well as the usual `ip(8)` and `ifconfig(8)` commands.\n\nTo run with more logging you may set the environment variable `LOG_LEVEL=debug`.\n\n## Platforms\n\n### Linux\n\nThis will run on Linux; however you should instead use the kernel module, which is faster and better integrated into the OS. See the [installation page](https://www.wireguard.com/install/) for instructions.\n\n### macOS\n\nThis runs on macOS using the utun driver. It does not yet support sticky sockets, and won't support fwmarks because of Darwin limitations. Since the utun driver cannot have arbitrary interface names, you must either use `utun[0-9]+` for an explicit interface name or `utun` to have the kernel select one for you. If you choose `utun` as the interface name, and the environment variable `WG_TUN_NAME_FILE` is defined, then the actual name of the interface chosen by the kernel is written to the file specified by that variable.\n\n### Windows\n\nThis runs on Windows, but you should instead use it from the more [fully featured Windows app](https://git.zx2c4.com/wireguard-windows/about/), which uses this as a module.\n\n### FreeBSD\n\nThis will run on FreeBSD. It does not yet support sticky sockets. Fwmark is mapped to `SO_USER_COOKIE`.\n\n### OpenBSD\n\nThis will run on OpenBSD. It does not yet support sticky sockets. Fwmark is mapped to `SO_RTABLE`. Since the tun driver cannot have arbitrary interface names, you must either use `tun[0-9]+` for an explicit interface name or `tun` to have the program select one for you. If you choose `tun` as the interface name, and the environment variable `WG_TUN_NAME_FILE` is defined, then the actual name of the interface chosen by the kernel is written to the file specified by that variable.\n\n## Building\n\nThis requires an installation of [go](https://golang.org) \u2265 1.18.\n\n```\n$ git clone https://git.zx2c4.com/wireguard-go\n$ cd wireguard-go\n$ make\n```\n\n## License\n\n Copyright (C) 2017-2022 WireGuard LLC. All Rights Reserved.\n \n Permission is hereby granted, free of charge, to any person obtaining a copy of\n this software and associated documentation files (the \"Software\"), to deal in\n the Software without restriction, including without limitation the rights to\n use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n of the Software, and to permit persons to whom the Software is furnished to do\n so, subject to the following conditions:\n \n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n \n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n", - "source_links": [], - "id": 137 - }, - { - "page_link": "https://github.com/bentoml/Yatai", - "title": "yatai readme", - "text": "# \ud83e\udd84\ufe0f Yatai: Model Deployment at Scale on Kubernetes\n\n[![actions_status](https://github.com/bentoml/yatai/workflows/CICD/badge.svg)](https://github.com/bentoml/yatai/actions)\n[![docs](https://readthedocs.org/projects/yatai/badge/?version=latest&style=flat-square)](https://docs.bentoml.org/projects/yatai)\n[![join_slack](https://badgen.net/badge/Join/Community%20Slack/cyan?icon=slack&style=flat-square)](https://join.slack.bentoml.org)\n\nYatai (\u5c4b\u53f0, food cart) lets you deploy, operate and scale Machine Learning services on Kubernetes. \n\nIt supports deploying any ML models via [BentoML: the unified model serving framework](https://github.com/bentoml/bentoml).\n\n\"yatai-overview-page\"\n\n\n\ud83d\udc49 [Join our Slack community today!](https://l.bentoml.com/join-slack)\n\n\u2728 Looking for the fastest way to give Yatai a try? Check out [BentoML Cloud](https://l.bentoml.com/bento-cloud) to get started today.\n\n\n---\n\n## Why Yatai?\n\n\ud83c\udf71 Made for BentoML, deploy at scale\n\n- Scale [BentoML](https://github.com/bentoml) to its full potential on a distributed system, optimized for cost saving and performance.\n- Manage deployment lifecycle to deploy, update, or rollback via API or Web UI.\n- Centralized registry providing the **foundation for CI/CD** via artifact management APIs, labeling, and WebHooks for custom integration.\n\n\ud83d\ude85 Cloud native & DevOps friendly\n\n- Kubernetes-native workflow via [BentoDeployment CRD](https://docs.bentoml.org/projects/yatai/en/latest/concepts/bentodeployment_crd.html) (Custom Resource Definition), which can easily fit into an existing GitOps workflow.\n- Native [integration with Grafana](https://docs.bentoml.org/projects/yatai/en/latest/observability/metrics.html) stack for observability.\n- Support for traffic control with Istio.\n- Compatible with all major cloud platforms (AWS, Azure, and GCP).\n\n\n## Getting Started\n\n- \ud83d\udcd6 [Documentation](https://docs.bentoml.org/projects/yatai/) - Overview of the Yatai docs and related resources\n- \u2699\ufe0f [Installation](https://docs.bentoml.org/projects/yatai/en/latest/installation/index.html) - Hands-on instruction on how to install Yatai for production use\n- \ud83d\udc49 [Join Community Slack](https://l.linklyhq.com/l/ktPW) - Get help from our community and maintainers\n\n\n## Quick Tour\n\nLet's try out Yatai locally in a minikube cluster!\n\n### \u2699\ufe0f Prerequisites:\n * Install latest minikube: https://minikube.sigs.k8s.io/docs/start/\n * Install latest Helm: https://helm.sh/docs/intro/install/\n * Start a minikube Kubernetes cluster: `minikube start --cpus 4 --memory 4096`, if you are using macOS, you should use [hyperkit](https://minikube.sigs.k8s.io/docs/drivers/hyperkit/) driver to prevent the macOS docker desktop [network limitation](https://docs.docker.com/desktop/networking/#i-cannot-ping-my-containers)\n * Check that minikube cluster status is \"running\": `minikube status`\n * Make sure your `kubectl` is configured with `minikube` context: `kubectl config current-context`\n * Enable ingress controller: `minikube addons enable ingress`\n\n### \ud83d\udea7 Install Yatai\n\nInstall Yatai with the following script:\n\n```bash\nbash <(curl -s \"https://raw.githubusercontent.com/bentoml/yatai/main/scripts/quick-install-yatai.sh\")\n```\n\nThis script will install Yatai along with its dependencies (PostgreSQL and MinIO) on\nyour minikube cluster. \n\nNote that this installation script is made for development and testing use only.\nFor production deployment, check out the [Installation Guide](https://docs.bentoml.org/projects/yatai/en/latest/installation/index.html).\n\nTo access Yatai web UI, run the following command and keep the terminal open:\n\n```bash\nkubectl --namespace yatai-system port-forward svc/yatai 8080:80\n```\n\nIn a separate terminal, run:\n\n```bash\nYATAI_INITIALIZATION_TOKEN=$(kubectl get secret yatai-env --namespace yatai-system -o jsonpath=\"{.data.YATAI_INITIALIZATION_TOKEN}\" | base64 --decode)\necho \"Open in browser: http://127.0.0.1:8080/setup?token=$YATAI_INITIALIZATION_TOKEN\"\n``` \n\nOpen the URL printed above from your browser to finish admin account setup.\n\n\n### \ud83c\udf71 Push Bento to Yatai\n\nFirst, get an API token and login to the BentoML CLI:\n\n* Keep the `kubectl port-forward` command in the step above running\n* Go to Yatai's API tokens page: http://127.0.0.1:8080/api_tokens\n* Create a new API token from the UI, making sure to assign \"API\" access under \"Scopes\"\n* Copy the login command upon token creation and run as a shell command, e.g.:\n\n ```bash\n bentoml yatai login --api-token {YOUR_TOKEN} --endpoint http://127.0.0.1:8080\n ```\n\nIf you don't already have a Bento built, run the following commands from the [BentoML Quickstart Project](https://github.com/bentoml/BentoML/tree/main/examples/quickstart) to build a sample Bento:\n\n```bash\ngit clone https://github.com/bentoml/bentoml.git && cd ./examples/quickstart\npip install -r ./requirements.txt\npython train.py\nbentoml build\n```\n\nPush your newly built Bento to Yatai:\n\n```bash\nbentoml push iris_classifier:latest\n```\n\nNow you can view and manage models and bentos from the web UI:\n\n\"yatai-bento-repos\"\n\"yatai-model-detail\"\n\n### \ud83d\udd27 Install yatai-image-builder component\n\nYatai's image builder feature comes as a separate component, you can install it via the following\nscript:\n\n```bash\nbash <(curl -s \"https://raw.githubusercontent.com/bentoml/yatai-image-builder/main/scripts/quick-install-yatai-image-builder.sh\")\n```\n\nThis will install the `BentoRequest` CRD([Custom Resource Definition](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)) and `Bento` CRD\nin your cluster. Similarly, this script is made for development and testing purposes only.\n\n### \ud83d\udd27 Install yatai-deployment component\n\nYatai's Deployment feature comes as a separate component, you can install it via the following\nscript:\n\n```bash\nbash <(curl -s \"https://raw.githubusercontent.com/bentoml/yatai-deployment/main/scripts/quick-install-yatai-deployment.sh\")\n```\n\nThis will install the `BentoDeployment` CRD([Custom Resource Definition](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/))\nin your cluster and enable the deployment UI on Yatai. Similarly, this script is made for development and testing purposes only.\n\n### \ud83d\udea2 Deploy Bento!\n\nOnce the `yatai-deployment` component was installed, Bentos pushed to Yatai can be deployed to your \nKubernetes cluster and exposed via a Service endpoint. \n\nA Bento Deployment can be created either via Web UI or via a Kubernetes CRD config:\n\n#### Option 1. Simple Deployment via Web UI\n\n* Go to the deployments page: http://127.0.0.1:8080/deployments\n* Click `Create` button and follow the instructions on the UI\n\n\"yatai-deployment-creation\"\n\n#### Option 2. Deploy with kubectl & CRD\n\nDefine your Bento deployment in a `my_deployment.yaml` file:\n\n```yaml\napiVersion: resources.yatai.ai/v1alpha1\nkind: BentoRequest\nmetadata:\n name: iris-classifier\n namespace: yatai\nspec:\n bentoTag: iris_classifier:3oevmqfvnkvwvuqj\n---\napiVersion: serving.yatai.ai/v2alpha1\nkind: BentoDeployment\nmetadata:\n name: my-bento-deployment\n namespace: yatai\nspec:\n bento: iris-classifier\n ingress:\n enabled: true\n resources:\n limits:\n cpu: \"500m\"\n memory: \"512m\"\n requests:\n cpu: \"250m\"\n memory: \"128m\"\n autoscaling:\n maxReplicas: 10\n minReplicas: 2\n runners:\n - name: iris_clf\n resources:\n limits:\n cpu: \"1000m\"\n memory: \"1Gi\"\n requests:\n cpu: \"500m\"\n memory: \"512m\"\n autoscaling:\n maxReplicas: 4\n minReplicas: 1\n```\n\nApply the deployment to your minikube cluster:\n```bash\nkubectl apply -f my_deployment.yaml\n```\n\nNow you can see the deployment process from the Yatai Web UI and find the endpoint URL for accessing\nthe deployed Bento.\n\n\"yatai-deployment-details\"\n\n\n\n\n## Community\n\n- To report a bug or suggest a feature request, use [GitHub Issues](https://github.com/bentoml/yatai/issues/new/choose).\n- For other discussions, use [GitHub Discussions](https://github.com/bentoml/BentoML/discussions) under the [BentoML repo](https://github.com/bentoml/BentoML/)\n- To receive release announcements and get support, join us on [Slack](https://join.slack.bentoml.org).\n\n## Contributing\n\nThere are many ways to contribute to the project:\n\n- If you have any feedback on the project, share it with the community in [GitHub Discussions](https://github.com/bentoml/BentoML/discussions) under the [BentoML repo](https://github.com/bentoml/BentoML/).\n- Report issues you're facing and \"Thumbs up\" on issues and feature requests that are relevant to you.\n- Investigate bugs and review other developers' pull requests.\n- Contributing code or documentation to the project by submitting a GitHub pull request. See the [development guide](https://github.com/bentoml/yatai/blob/main/DEVELOPMENT.md).\n\n### Usage Reporting\n\nYatai collects usage data that helps our team to improve the product.\nOnly Yatai's internal API calls are being reported. We strip out as much potentially\nsensitive information as possible, and we will never collect user code, model data, model names, or stack traces.\nHere's the [code](./api-server/services/tracking/) for usage tracking.\nYou can opt-out of usage by configuring the helm chart option `doNotTrack` to\n`true`.\n\n```yaml\ndoNotTrack: false\n```\n\nOr by setting the `YATAI_DONOT_TRACK` env var in yatai deployment.\n```yaml\nspec:\n template:\n spec:\n containers:\n env:\n - name: YATAI_DONOT_TRACK\n value: \"true\"\n```\n\n\n## Licence\n\n[Elastic License 2.0 (ELv2)](https://github.com/bentoml/yatai/blob/main/LICENSE.md)\n", - "source_links": [], - "id": 138 - }, - { - "page_link": "https://github.com/yugabyte/yugabyte-db", - "title": "yugabyte readme", - "text": "\"YugabyteDB\"\n\n---------------------------------------\n\n[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\n[![Documentation Status](https://readthedocs.org/projects/ansicolortags/badge/?version=latest)](https://docs.yugabyte.com/)\n[![Ask in forum](https://img.shields.io/badge/ask%20us-forum-orange.svg)](https://forum.yugabyte.com/)\n[![Slack chat](https://img.shields.io/badge/Slack:-%23yugabyte_db-blueviolet.svg?logo=slack)](https://communityinviter.com/apps/yugabyte-db/register)\n[![Analytics](https://yugabyte.appspot.com/UA-104956980-4/home?pixel&useReferer)](https://github.com/yugabyte/ga-beacon)\n\n# What is YugabyteDB? \n\n**YugabyteDB** is a **high-performance, cloud-native, distributed SQL database** that aims to support **all PostgreSQL features**. It is best suited for **cloud-native OLTP (i.e., real-time, business-critical) applications** that need absolute **data correctness** and require at least one of the following: **scalability, high tolerance to failures, or globally-distributed deployments.**\n\n* [Core Features](#core-features)\n* [Get Started](#get-started)\n* [Build Apps](#build-apps)\n* [What's being worked on?](#whats-being-worked-on)\n* [Architecture](#architecture)\n* [Need Help?](#need-help)\n* [Contribute](#contribute)\n* [License](#license)\n* [Read More](#read-more)\n\n# Core Features\n\n* **Powerful RDBMS capabilities** Yugabyte SQL (*YSQL* for short) reuses the query layer of PostgreSQL (similar to Amazon Aurora PostgreSQL), thereby supporting most of its features (datatypes, queries, expressions, operators and functions, stored procedures, triggers, extensions, etc). Here is a detailed [list of features currently supported by YSQL](https://docs.yugabyte.com/preview/explore/ysql-language-features/postgresql-compatibility/).\n\n* **Distributed transactions** The transaction design is based on the Google Spanner architecture. Strong consistency of writes is achieved by using Raft consensus for replication and cluster-wide distributed ACID transactions using *hybrid logical clocks*. *Snapshot*, *serializable* and *read committed* isolation levels are supported. Reads (queries) have strong consistency by default, but can be tuned dynamically to read from followers and read-replicas.\n\n* **Continuous availability** YugabyteDB is extremely resilient to common outages with native failover and repair. YugabyteDB can be configured to tolerate disk, node, zone, region, and cloud failures automatically. For a typical deployment where a YugabyteDB cluster is deployed in one region across multiple zones on a public cloud, the RPO is 0 (meaning no data is lost on failure) and the RTO is 3 seconds (meaning the data being served by the failed node is available in 3 seconds).\n\n* **Horizontal scalability** Scaling a YugabyteDB cluster to achieve more IOPS or data storage is as simple as adding nodes to the cluster.\n\n* **Geo-distributed, multi-cloud** YugabyteDB can be deployed in public clouds and natively inside Kubernetes. It supports deployments that span three or more fault domains, such as multi-zone, multi-region, and multi-cloud deployments. It also supports xCluster asynchronous replication with unidirectional master-slave and bidirectional multi-master configurations that can be leveraged in two-region deployments. To serve (stale) data with low latencies, read replicas are also a supported feature.\n\n* **Multi API design** The query layer of YugabyteDB is built to be extensible. Currently, YugabyteDB supports two distributed SQL APIs: **[Yugabyte SQL (YSQL)](https://docs.yugabyte.com/preview/api/ysql/)**, a fully relational API that re-uses query layer of PostgreSQL, and **[Yugabyte Cloud QL (YCQL)](https://docs.yugabyte.com/preview/api/ycql/)**, a semi-relational SQL-like API with documents/indexing support with Apache Cassandra QL roots.\n\n* **100% open source** YugabyteDB is fully open-source under the [Apache 2.0 license](https://github.com/yugabyte/yugabyte-db/blob/master/LICENSE.md). The open-source version has powerful enterprise features such as distributed backups, encryption of data-at-rest, in-flight TLS encryption, change data capture, read replicas, and more.\n\nRead more about YugabyteDB in our [FAQ](https://docs.yugabyte.com/preview/faq/general/).\n\n# Get Started\n\n* [Quick Start](https://docs.yugabyte.com/preview/quick-start/)\n* Try running a real-world demo application:\n * [Microservices-oriented e-commerce app](https://github.com/yugabyte/yugastore-java)\n * [Streaming IoT app with Kafka and Spark Streaming](https://docs.yugabyte.com/preview/develop/realworld-apps/iot-spark-kafka-ksql/)\n\nCannot find what you are looking for? Have a question? Please post your questions or comments on our Community [Slack](https://communityinviter.com/apps/yugabyte-db/register) or [Forum](https://forum.yugabyte.com).\n\n# Build Apps\n\nYugabyteDB supports many languages and client drivers, including Java, Go, NodeJS, Python, and more. For a complete list, including examples, see [Drivers and ORMs](https://docs.yugabyte.com/preview/drivers-orms/).\n\n# What's being worked on?\n\n> This section was last updated in **March, 2022**.\n\n## Current roadmap\n\nHere is a list of some of the key features being worked on for the upcoming releases (the YugabyteDB [**v2.13 preview release**](https://docs.yugabyte.com/preview/releases/release-notes/v2.13/) has been released in **March, 2022**, and the [**v2.12 stable release**](https://blog.yugabyte.com/announcing-yugabytedb-2-12/) was released in **Feb 2022**).\n\n| Feature | Status | Release Target | Progress | Comments |\n| ----------------------------------------------- | --------- | -------------- | --------------- | ------------- |\n|[Faster Bulk-Data Loading in YugabyteDB](https://github.com/yugabyte/yugabyte-db/issues/11765)| PROGRESS| v2.15 |[Track](https://github.com/yugabyte/yugabyte-db/issues/11765)| Master issue to track improvements to make it easier and faster to get large amounts of data into YugabyteDB.\n|[Database-level multi-tenancy with tablegroups](https://github.com/yugabyte/yugabyte-db/issues/11665)| PROGRESS| v2.15 |[Track](https://github.com/yugabyte/yugabyte-db/issues/11665)| Master issue to track Database-level multi-tenancy using tablegroups.\n|[Upgrade to PostgreSQL v13](https://github.com/yugabyte/yugabyte-db/issues/9797)| PROGRESS| v2.15 |[Track](https://github.com/yugabyte/yugabyte-db/issues/9797)| For latest features, new PostgreSQL extensions, performance, and community fixes\n|Support for [in-cluster PITR](https://github.com/yugabyte/yugabyte-db/issues/7120) | PROGRESS| v2.15 |[Track](https://github.com/yugabyte/yugabyte-db/issues/7120)|Point in time recovery of YSQL databases, to a fixed point in time, across DDL and DML changes|\n| [Automatic tablet splitting enabled by default](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/docdb-automatic-tablet-splitting.md) | PROGRESS | v2.15 | [Track](https://github.com/yugabyte/yugabyte-db/issues/1004) |Enables changing the number of tablets (which are splits of data) at runtime.|\n| YSQL-table statistics and cost based optimizer(CBO) | PROGRESS | v2.15 | [Track](https://github.com/yugabyte/yugabyte-db/issues/5242) | Improve YSQL query performance |\n| [YSQL-Feature support - ALTER TABLE](https://github.com/yugabyte/yugabyte-db/issues/1124) | PROGRESS | v2.15 | [Track](https://github.com/yugabyte/yugabyte-db/issues/1124) | Support for various `ALTER TABLE` variants |\n| [YSQL-Online schema migration](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/online-schema-migrations.md) | PROGRESS | v2.15 | [Track](https://github.com/yugabyte/yugabyte-db/issues/4192) | Schema migrations(includes DDL operations) to be safely run concurrently with foreground operations |\n| Pessimistic locking Design | PROGRESS | v2.15 | [Track](https://github.com/yugabyte/yugabyte-db/issues/5680) | |\n| Make [`COLOCATED` tables](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/ysql-colocated-tables.md) default for YSQL | PLANNING | | [Track](https://github.com/yugabyte/yugabyte-db/issues/5239) | |\n| Support for transactions in async [xCluster replication](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/multi-region-xcluster-async-replication.md) | PLANNING | | [Track](https://github.com/yugabyte/yugabyte-db/issues/1808) | Apply transactions atomically on consumer cluster. |\n| Support for GiST indexes | PLANNING | | [Track](https://github.com/yugabyte/yugabyte-db/issues/1337) |Suppor for GiST (Generalized Search Tree) based index|\n\n## Recently released features\n\n| Feature | Status | Release Target | Docs / Enhancements | Comments |\n| ----------------------------------------------- | --------- | -------------- | ------------------- | ------------- |\n|[Change Data Capture](https://github.com/yugabyte/yugabyte-db/issues/9019)| \u2705 *DONE*| v2.13 ||Change data capture (CDC) allows multiple downstream apps and services to consume the continuous and never-ending stream(s) of changes to Yugabyte databases|\n|[Support for materalized views](https://github.com/yugabyte/yugabyte-db/issues/10102) | \u2705 *DONE*| v2.13 |[Docs](https://docs.yugabyte.com/preview/explore/ysql-language-features/advanced-features/views/#materialized-views)|A materialized view is a pre-computed data set derived from a query specification and stored for later use|\n|[Geo-partitioning support](https://github.com/yugabyte/yugabyte-db/issues/9980) for the transaction status table | \u2705 *DONE*| v2.13 |[Docs](https://docs.yugabyte.com/preview/explore/multi-region-deployments/row-level-geo-partitioning/)|Instead of central remote transaction execution metatda, it is now optimized for access from different regions. Since the transaction metadata is also geo partitioned, it eliminates the need for round-trip to remote regions to update transaction statuses.|\n| Transparently restart transactions | \u2705 *DONE*| v2.13 | |Decrease the incidence of transaction restart errors seen in various scenarios |\n| [Row-level geo-partitioning](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/ysql-row-level-partitioning.md) | \u2705 *DONE*| v2.13 |[Docs](https://docs.yugabyte.com/preview/explore/multi-region-deployments/row-level-geo-partitioning/)|Row-level geo-partitioning allows fine-grained control over pinning data in a user table (at a per-row level) to geographic locations, thereby allowing the data residency to be managed at the table-row level.|\n| [YSQL-Support `GIN` indexes](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/ysql-gin-indexes.md) | \u2705 *DONE* | v2.11 | [Docs](https://docs.yugabyte.com/preview/explore/ysql-language-features/gin/) | Support for generalized inverted indexes for container data types like jsonb, tsvector, and array |\n| [YSQL-Collation Support](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/ysql-collation-support.md) | \u2705 *DONE* | v2.11 |[Docs](https://docs.yugabyte.com/preview/explore/ysql-language-features/collations/) |Allows specifying the sort order and character classification behavior of data per-column, or even per-operation according to language and country-specific rules |\n[YSQL-Savepoint Support](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/savepoints.md) | \u2705 *DONE* | v2.11 |[Docs](https://docs.yugabyte.com/preview/explore/ysql-language-features/savepoints/) | Useful for implementing complex error recovery in multi-statement transaction|\n| [xCluster replication management through Platform](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/platform-xcluster-replication-management.md) | \u2705 *DONE* | v2.11 | [Docs](https://docs.yugabyte.com/preview/yugabyte-platform/create-deployments/async-replication-platform/) | \n| [Spring Data YugabyteDB module](https://github.com/yugabyte/yugabyte-db/blob/master/architecture/design/spring-data-yugabytedb.md) | \u2705 *DONE* | v2.9 | [Track](https://github.com/yugabyte/yugabyte-db/issues/7956) | Bridges the gap for learning the distributed SQL concepts with familiarity and ease of Spring Data APIs |\n| Support Liquibase, Flyway, ORM schema migrations | \u2705 *DONE* | v2.9 | [Docs](https://blog.yugabyte.com/schema-versioning-in-yugabytedb-using-flyway/) | \n| [Support `ALTER TABLE` add primary key](https://github.com/yugabyte/yugabyte-db/issues/1124) | \u2705 *DONE* | v2.9 | [Track](https://github.com/yugabyte/yugabyte-db/issues/1124) | |\n| [YCQL-LDAP Support](https://github.com/yugabyte/yugabyte-db/issues/4421) | \u2705 *DONE* | v2.8 |[Docs](https://docs.yugabyte.com/preview/secure/authentication/ldap-authentication-ycql/#root) | support LDAP authentication in YCQL API | \n| [Platform Alerting and Notification](https://blog.yugabyte.com/yugabytedb-2-8-alerts-and-notifications/) | \u2705 *DONE* | v2.8 | [Docs](https://docs.yugabyte.com/preview/yugabyte-platform/alerts-monitoring/alert/) | To get notified in real time about database alerts, user defined alert policies notify you when a performance metric rises above or falls below a threshold you set.| \n| [Platform API](https://blog.yugabyte.com/yugabytedb-2-8-api-automated-operations/) | \u2705 *DONE* | v2.8 | [Docs](https://api-docs.yugabyte.com/docs/yugabyte-platform/ZG9jOjIwMDY0MTA4-platform-api-overview) | Securely Deploy YugabyteDB Clusters Using Infrastructure-as-Code| \n\n# Architecture\n\n\"YugabyteDB\n\nReview detailed architecture in our [Docs](https://docs.yugabyte.com/preview/architecture/).\n\n# Need Help?\n\n* You can ask questions, find answers, and help others on our Community [Slack](https://communityinviter.com/apps/yugabyte-db/register), [Forum](https://forum.yugabyte.com), [Stack Overflow](https://stackoverflow.com/questions/tagged/yugabyte-db), as well as Twitter [@Yugabyte](https://twitter.com/yugabyte)\n\n* Please use [GitHub issues](https://github.com/yugabyte/yugabyte-db/issues) to report issues or request new features.\n\n* To Troubleshoot YugabyteDB, cluser/node level isssues, Please refer to [Troubleshooting documentation](https://docs.yugabyte.com/preview/troubleshoot/)\n\n# Contribute\n\nAs an an open-source project with a strong focus on the user community, we welcome contributions as GitHub pull requests. See our [Contributor Guides](https://docs.yugabyte.com/preview/contribute/) to get going. Discussions and RFCs for features happen on the design discussions section of our [Forum](https://forum.yugabyte.com).\n\n# License\n\nSource code in this repository is variously licensed under the Apache License 2.0 and the Polyform Free Trial License 1.0.0. A copy of each license can be found in the [licenses](licenses) directory.\n\nThe build produces two sets of binaries:\n\n* The entire database with all its features (including the enterprise ones) are licensed under the Apache License 2.0\n* The binaries that contain `-managed` in the artifact and help run a managed service are licensed under the Polyform Free Trial License 1.0.0.\n\n> By default, the build options generate only the Apache License 2.0 binaries.\n\n# Read More\n\n* To see our updates, go to [The Distributed SQL Blog](https://blog.yugabyte.com/).\n* For an in-depth design and the YugabyteDB architecture, see our [design specs](https://github.com/yugabyte/yugabyte-db/tree/master/architecture/design).\n* Tech Talks and [Videos](https://www.youtube.com/c/YugaByte).\n* See how YugabyteDB [compares with other databases](https://docs.yugabyte.com/preview/faq/comparisons/).\n", - "source_links": [], - "id": 139 - }, - { - "page_link": "https://docs.plural.sh/adding-new-application/getting-started-with-runbooks", - "title": " Getting Started With Runbooks", - "text": "# Getting Started With Runbooks\n\nWhat are Plural runbooks? How do I use and create them?\n\n## Articles in this section:\n\nXML Runbooks YAML Runbooks\n\n[XML Runbooks](/adding-new-application/getting-started-with-runbooks/runbook-xml)\n\n[YAML Runbooks](/adding-new-application/getting-started-with-runbooks/runbook-yaml)\n\nPlural Runbooks are meant to be installed alongside your open source applications and serve as interactive tutorials for how to perform common maintenance tasks.\n\nPlural comes with a library of runbooks for each application; you are also free to create your own.\n\nYou can create a runbook just for your own use in your Plural installation, or you can choose to publish the runbook and make it available for other Plural users.\n\n[here](/adding-new-application/getting-started-with-runbooks/runbook-yaml)\n\n[here](/adding-new-application/getting-started-with-runbooks/runbook-xml)\n\nYou can access the runbooks through the Plural admin console; i.e. you must first install the Plural admin console in order to use the runbooks.\n\n[install the Plural admin console](/getting-started/admin-console)\n\n#### \n\n[](/adding-new-application/getting-started-with-runbooks#_)\n\n[Edit on Github](https://github.com/pluralsh/documentation/blob/main/pages/adding-new-application/getting-started-with-runbooks/index.md)", - "source_links": [], - "id": 140 - }, - { - "page_link": "https://docs.plural.sh/adding-new-application/getting-started-with-runbooks/runbook-xml", - "title": " XML Runbooks", - "text": "# XML Runbooks\n\nCreating a Plural runbook from XML.\n\n#### XML Tag Definitions\n\n[](/adding-new-application/getting-started-with-runbooks/runbook-xml#xml-tag-definitions)\n\nPlural runbooks are written in XML. XML doesn\u2019t have a predefined markup language, like HTML does. Instead, XML allows users to create their own markup symbols to describe content, making an unlimited and self-defining symbol set.\n\nWe have defined the following xml attributes in an Elixir file that may be used in the creation of your own runbooks and help standardize their layout.\n\n```\ndefmodule Console.Runbooks.Display do\n use Console.Runbooks.Display.Base\n alias Console.Runbooks.Display.{Xml}\n\n schema do\n component \"box\" do\n attributes ~w(direction width height pad margin align justify gap fill color border borderSide borderSize)\n parents ~w(root box)\n end\n\n component \"text\" do\n attributes ~w(size weight value color)\n parents ~w(box text root link)\n end\n\n component \"markdown\" do\n attributes ~w(size weight value)\n parents ~w(box text root)\n end\n\n component \"button\" do\n attributes ~w(primary label href target action headline)\n parents ~w(box)\n end\n\n component \"input\" do\n attributes ~w(placeholder name label datatype)\n parents ~w(box)\n end\n\n component \"timeseries\" do\n attributes ~w(label datasource)\n parents ~w(box)\n end\n\n component \"valueFrom\" do\n attributes ~w(datasource path doc)\n parents ~w(input text)\n end\n\n component \"image\" do\n attributes ~w(width height url)\n parents ~w(box link)\n end\n\n component \"video\" do\n attributes ~w(width height url autoPlay loop)\n parents ~w(box link)\n end\n\n component \"link\" do\n attributes ~w(href target value color weight)\n parents ~w(text box)\n end\n\n component \"table\" do\n attributes ~w(name width height datasource path)\n parents ~w(box)\n end\n\n component \"tableColumn\" do\n attributes ~w(path header width)\n parents ~w(table)\n end\n end\n\n def parse_doc(xml) do\n with {:ok, parsed} <- Xml.from_xml(xml) do\n case validate(parsed) do\n :pass -> {:ok, parsed}\n {:fail, error} -> {:error, error}\n end\n end\n end\nend\n```\n\nMost of these attributes, like box and input are basically grommet React components. However, we would like to call out a few custom attributes that interact with other data from the runbook. They each refer to a datasource and then maybe also a way to access a value at that datasource.\n\ntimeseriesdatasourcevalueFromdatasourcedocpath\n\ndatasource\n\ndatasourcedocpath\n\nHere is an example Runbook XML template composed of these attributes.\n\n```\n\n \n