diff --git a/site/app/views/default/index.html b/site/app/views/default/index.html
index 402445d3..d09cd585 100644
--- a/site/app/views/default/index.html
+++ b/site/app/views/default/index.html
@@ -121,10 +121,21 @@
With this collection of guided, hands-on tutorials you will learn how to use Keptn - from installation to continuous delivery with quality gates and automated operations.
-
- We suggest to start with a full tour to learn about all aspects of Keptn. But feel free to explore all other tutorials as well.
- The default overview shows tutorials for the latest Keptn version - but we provide tutorials for older versions as well that can be selected via the version dropdown.
+ The default overview shows tutorials for the most recent Keptn version that has tutorials available.
+ We also provide tutorials for older versions as well that can be selected via the version dropdown.
+
+
+
+ Learning path
+
+ If you're just starting with Keptn, we suggest the following learning path.
+
+ 1) Start with a quickstart for Keptn and Prometheus that allows to explore Keptn in a few minutes.
+
+ 2) Take a full tour to learn about all aspects of Keptn, you can either choose a full tour for Prometheus or for Dynatrace platform.
+
+ 3) Explore other technology-specific tutorials.
{%- else -%}
diff --git a/site/app/views/default/view.json b/site/app/views/default/view.json
index 118e64b5..2dcbe196 100644
--- a/site/app/views/default/view.json
+++ b/site/app/views/default/view.json
@@ -1,7 +1,7 @@
{
"title": "Keptn Tutorials",
"description": "We provide a collection of guided, hands-on tutorials to help you get started using Keptn",
- "tags": ["keptn09x"],
+ "tags": ["keptn011x"],
"categories": [],
"exclude": [
"^lang-.*"
@@ -9,5 +9,11 @@
"logoUrl": "/images/logo.png",
"toolbarBgColor": "#006bb8",
"sort": "mainCategory",
- "pins": ["keptn-full-tour-prometheus","keptn-full-tour-dynatrace","keptn-full-tour-prometheus-07","keptn-full-tour-dynatrace-07","keptn-full-tour-prometheus-08","keptn-full-tour-dynatrace-08","keptn-full-tour-prometheus-09","keptn-full-tour-dynatrace-09"]
+ "pins": [
+ "keptn-public-demo","keptn-quickstart","keptn-full-tour-prometheus","keptn-full-tour-dynatrace",
+ "keptn-full-tour-prometheus-07","keptn-full-tour-dynatrace-07",
+ "keptn-public-demo-08","keptn-full-tour-prometheus-08","keptn-full-tour-dynatrace-08",
+ "keptn-public-demo-09","keptn-full-tour-prometheus-09","keptn-full-tour-dynatrace-09",
+ "keptn-public-demo-010","keptn-full-tour-prometheus-010","keptn-full-tour-dynatrace-010",
+ "keptn-public-demo-011","keptn-quickstart-011","keptn-full-tour-prometheus-011","keptn-full-tour-dynatrace-011"]
}
diff --git a/site/app/views/keptn011x/keptn-large.png b/site/app/views/keptn011x/keptn-large.png
new file mode 100644
index 00000000..1722be2d
Binary files /dev/null and b/site/app/views/keptn011x/keptn-large.png differ
diff --git a/site/app/views/keptn011x/logo_color.png b/site/app/views/keptn011x/logo_color.png
new file mode 100644
index 00000000..2b89a6b0
Binary files /dev/null and b/site/app/views/keptn011x/logo_color.png differ
diff --git a/site/app/views/keptn011x/view.json b/site/app/views/keptn011x/view.json
new file mode 100644
index 00000000..680905a1
--- /dev/null
+++ b/site/app/views/keptn011x/view.json
@@ -0,0 +1,11 @@
+{
+ "title": "Keptn 0.11.x",
+ "description": "A collection of tutorials for Keptn version 0.11.x",
+ "logoUrl": "/keptn011x/keptn-large.png",
+ "tags": ["keptn011x"],
+ "toolbarBgColor": "#006bb8",
+ "exclude": [
+ ".*-about$",
+ "^lang-.*"
+ ]
+}
diff --git a/site/tutorials/keptn-argo-rollouts-dynatrace-010-on-k3s.md b/site/tutorials/keptn-argo-rollouts-dynatrace-010-on-k3s.md
index 43ab2e66..c7bf403a 100644
--- a/site/tutorials/keptn-argo-rollouts-dynatrace-010-on-k3s.md
+++ b/site/tutorials/keptn-argo-rollouts-dynatrace-010-on-k3s.md
@@ -1,5 +1,5 @@
summary: 5 minute installation. All running on K3s with a single line installation. No Kubernetes cluster needed.
-id: keptn-argo-rollouts-dynatrace-09-on-k3s
+id: keptn-argo-rollouts-dynatrace-20-on-k3s
categories: Dynatrace,k3s,argo,canary
tags: keptn010x,introduction
status: Published
diff --git a/site/tutorials/keptn-argo-rollouts-dynatrace-011-on-k3s.md b/site/tutorials/keptn-argo-rollouts-dynatrace-011-on-k3s.md
new file mode 100644
index 00000000..e1bb9288
--- /dev/null
+++ b/site/tutorials/keptn-argo-rollouts-dynatrace-011-on-k3s.md
@@ -0,0 +1,422 @@
+summary: 5 minute installation. All running on K3s with a single line installation. No Kubernetes cluster needed.
+id: keptn-argo-rollouts-dynatrace-11-on-k3s
+categories: Dynatrace,k3s,argo,canary
+tags: keptn011x,introduction
+status: Published
+authors: Andreas Grabner
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+
+# Keptn Multi Stage Delivery with Argo Rollouts and Dynatrace on k3s
+
+## Welcome
+Duration: 2:00
+
+Watch the intro in this YouTube video to see what this tutorial includes:
+
+
+**Canary deployments** are a very popular progressive delivery model where new versions of a software component is safely released in a "phased" approach (e.g: 25%, 50%, 75%, 100% of traffic) instead of a big bang (100% of traffic). Between each phase an evaluation takes place deciding on whether to continue with the next phase of rolling out the canary to more end users or whether to roll it back to the previous version.
+
+[Argo Rollouts](https://argoproj.github.io/argo-rollouts/) is a very popular open source progressive delivery controller for Kubernetes (k8s) and provides both Blue/Green as well as Canary rollout models. If you want to learn more check out their [introduction video of Argo Rollouts](https://www.youtube.com/watch?v=hIL0E2gLkf8)
+
+### Why Keptn with Argo Rollouts?
+
+Argo Rollouts is clearly a great and proven choice when it comes to managing Blue/Green or Canary rollouts.
+
+
+
+And while Argo Rollouts include an analysis component to control the rollout itself by analyzing metrics from external tools (Prometheus, Kayenta, Wavefront ...) it lacks other capabilities that require to integrate and automate Argo Rollout into other tools that can cover:
+* Pre-validate environment
+* Automated observability configuration
+* Analysis based on more complex SLOs
+* Analysis against previous timeframes
+* Analysis result based promotion type (manual vs automated)
+* Visualization of canary analysis
+* Automated testing as part of rollout
+* Status notifications of rollout progress
+* Integrate approval process into ChatOps
+
+### The best of both worlds: Keptn managing Argo Rollouts
+
+
+
+Keptn's event-driven and open standard approach to delivery automation allows us to take the best of both worlds:
+* Argo Rollouts: Blue / Green & Canary
+* Keptn: Advanced SLO-based Quality Gates between Rollout Steps
+* Keptn: SLO-based approval strategies (manual & automated)
+* Keptn: Visualization of Rollout Status & Quality Gates
+* Keptn: Automate Testing of each canary
+* Keptn: Integrate into your ChatOps tools
+* Keptn: Multi-Stage Delivery Support
+
+So - lets get started with our tutorial!
+
+
+## Prepare Installation of Keptn on k3s
+Duration: 5:00
+
+We have an extensive [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s) tutorial on GitHub that includes setting up k3s, installing Keptn and automatically creating several Keptn projects to demo different use case with Dynatrace.
+In this tutorial we focus on the Delivery Use Case using Argo Rollouts with Dynatrace as Observability Platform. If you want to explore more check out other tutorials on https://tutorials.keptn.sh or check out the full tutorial details on [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s)
+
+### Pre-Requs
+
+**Linux Machine**
+As we are going to deploy an app in multiple stages as well as in multiple replicas we need a machine that can not only run keptn but can be used to run our app as well.
+You should therefore have a Linux machine with at least 8vCPUs and 32GB of RAM, e.g: EC2 t3.2large. As for storage - please have at least 30GB of storage
+As we will be accessing Keptn via HTTP and HTTPS make sure that both ports (80 & 443) are allowed by your firewall.
+What we will need is SSH access to this machine and SUDO rights so we can execute our scripts
+
+So - here is our checklist:
+- [x] Large enough Linux Machine
+- [x] Ports 80 & 443 are open
+- [x] SSH access and SUDO rights
+
+**Dynatrace Environment**
+Next thing we need is a Dynatrace environment. If you don't have one - just sign up for a [Dynatrace SaaS Trial](https://dynatrace.com/trial?utm_campaign=keptn). As this is a tutorial includes automated rollout decisions based on SLOs (Service Level Objectives) we use Keptn to pull this data from Dynatrace as Dynatrace will automatically monitor our k3s cluster including every app we deploy on it.
+
+*1: API Token: DT_API_TOKEN*
+For our Keptn installation we need a Dynatrace API Token that Keptn can use to query data as well as push some dashboards.
+For that - please create an API token (via Settings -> Integration -> Dynatrace API) with the following privileges. Make sure you copy that API Token in a safe spot:
+
+
+
+*2: PaaS Token: DT_PAAS_TOKEN*
+We also need a Dynatrace PaaS Token that allows our install script to install a Dynatrace OneAgent on k3s to also monitor that k3s cluster automatically. For that go to Settings -> Integration -> Platform as a Service and create a new token!
+
+*3: Your Dynatrace Tenant Host: DT_TENANT*
+If you have a Dynatrace SaaS (e.g: trial) environment we need the host name, e.g: abc12345.live.dynatrace.com. If you run Dynatrace Managed then we need the host + environment id, e.g: yourmanageddomain.com/e/YOUR-ENV-ID
+
+*4: Your Dynatrace username: OWNER_EMAIL*
+This should be simple. The username you have when logging in to Dynatrace. That is probably your email. We need that because a dashboard will be created automatically for us and every dashboard needs an owner. This is why we need that email!
+
+So - here is our checklist:
+- [x] DT_API_TOKEN
+- [x] DT_PAAS_TOKEN
+- [x] DT_TENANT
+- [x] OWNER_EMAIL
+
+### Preparing our Linux Host
+
+Now that we have all data we can start with the installation
+
+**Installing additional command line tools**
+The installation script of our tutorial needs a couple of tools as those scripts will download some additional files (via curl), will parse some files (jq, yq), will iterate through file system (tree). We will also need git to download a git repo. Here is a list of all these tools and how you could install them if you have yum. For other Linux distributions please check how to install these tools: git, curl, jq, tree, yq
+
+```console
+sudo yum update -y
+sudo yum install git -y
+sudo yum install curl -y
+sudo yum install jq -y
+sudo yum install tree -y
+sudo wget https://github.com/mikefarah/yq/releases/download/v4.2.0/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
+```
+
+**Download (git clone) the tutorial**
+As mentioned, the [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s) is a broad tutorial. While we are only using parts of it we simply download the whole thing locally which also includes the actual installation script. To do that we clone the release-0.10.0 branch of that tutorial:
+
+```console
+git clone --branch 0.7.3 https://github.com/keptn-sandbox/keptn-on-k3s --single-branch
+cd keptn-on-k3s
+```
+
+## Installing Keptn on k3s for Argo Rollouts use cases
+Duration: 5:00
+
+We are almost ready to run our installation script. The only thing left to do is to export some of our data we prepared earlier (token, endpoints ...) via env-variables so the script can easily pick it up. Additionally to the 4 variables we prepared we also `export LE_STAGE=staging` - this will allow us to get a LetsEncrypt staging certificate so we can use TLS encryption.
+
+Now - here is what you should export - obviously with the values that you have collected:
+```console
+$ export DT_TENANT=abc12345.live.dynatrace.com
+$ export DT_API_TOKEN=YOUR_API_TOKEN
+$ export DT_PAAS_TOKEN=YOUR_PAAS_TOKEN
+$ export OWNER_EMAIL=yourdynatraceuser@yourmail.com
+$ export LE_STAGE=staging
+```
+
+The installation scripts has multiple options - here is the one that will install the full tutorial which installs Keptn's Delivery Plane and a handful of sample projects including the `demo-rollout` project which showcases the Argo Rollout use case!
+
+**BE AWARE** there is an option called --provider. If you run on e.g: EC2 then specify aws. If your machine is hosted on GCP then specify gcp. If you just run on a local machine or a VM you can omit that parameter!
+
+```console
+./install-keptn-on-k3s.sh --deliveryplane --provider aws --with-dynatrace --with-demo dynatrace --letsencrypt --with-gitea --use-nip
+```
+
+**Use your own Domain Name (SUGGESTED)**
+By default the installation will use your local IP and a free DNS Resolution service from nip.io to use proper DNS names. That works well but we have learned that it might sometimes be a bit unstable. You can create your own DNS, eg.: using Route53 to point to your public IP and then pass this domain name via the parameter --fqdn. Here would be the installation option to install the quality gates with a custom domain!
+```console
+./install-keptn-on-k3s.sh --deliveryplane --provider aws --with-dynatrace --with-demo dynatrace --letsencrypt --with-gitea --fqdn yourdomain.abc
+```
+I TRULY ENCOURAGE you to get your own DNS as it is simply more stable!
+
+
+---
+
+At the end of the installation the script outputs information about the installation and about each demo that was installed. Important for our tutorial is the output that looks like this as it contains all information on how we can access Keptn:
+```
+#######################################>
+# Keptn Deployment Summary
+#######################################>
+API URL : https://keptn.YOUR.IP.nip.io/api
+Bridge URL: https://keptn.YOUR.IP.nip.io/bridge
+Bridge Username: keptn
+Bridge Password: YOURBRIDGEPASSWORD
+API Token : YOURKEPTNAPITOKEN
+Git Server: http://git.YOUR.IP.nip.io
+Git User: keptn
+Git Password: keptn#R0cks
+```
+
+Additionally take note of the Dynatrace Demo Summary block. There is a section covering the Argo Rollout Demo
+```
+#######################################>
+# Dynatrace Demo Summary
+#######################################>
+6 Dynatrace Demo projects have been created, the Keptn CLI has been downloaded and configured and a first demo quality gate was already executed.
+
+....
+------------------------------------------------------------------------
+For the Canary Delivery Use Case using Argo Rollouts we have created project demo-rollout that deploys a simplenode app in 2 stages (blue/green in staging and canary in prod)
+To trigger a delivery simple do this
+1: Trigger a delivery through the Keptn CLI or the Keptn API as explained in the readme
+ keptn trigger delivery --project=demo-rollout --stage=staging --service=simplenode --image=docker.io/grabnerandi/simplenodeservice --tag=1.0.0
+2: Watch the delivery progress in Keptn's bridge
+ Project URL: https://keptn.YOUR.IP.nip.io/bridge/project/demo-rollout
+ User / PWD: keptn / YOURBRIDGEPASSWORD
+3: To deliver the next version simply run
+ keptn trigger delivery --project=demo-rollout --stage=staging --service=simplenode --image=docker.io/grabnerandi/simplenodeservice --tag=2.0.0
+
+```
+
+### Access k3s
+You can access k3s through the k3s cli that offers you full access to all kubectl commands. Like this
+```
+k3s kubectl get pods -A
+```
+
+If you want to use kubectl directly you can export the kubeconfig and then run the same command using kubectl like this:
+```
+export KUBECONFIG="/etc/rancher/k3s/k3s.yaml"
+kubectl get pods -A
+```
+
+### Access Keptn's Bridge
+To validate the installation went fine lets open Keptn's bridge by following the link to the Dynatrace project that was created for us. The url ends with /bridge/project/demo-rollout!
+
+
+
+We see that this Keptn project uses a 2-stage shipyard file. In staging we will later see a Blue/Green Argo Rollout and in Prod we will see a Canary Argo Rollout.
+
+You may notice that the URL ends with nip.io. We are using this free DNS service to leverage DNS names which also allows us to do some traffic routing on different domain names even though everything in the end resolves to your local IP. You will also notice that your browser tells you that the website is not secure even though you are accessing an https endpoint. This is because we created a temporary staging certificate using LetsEncrypt. If you want to use Keptn for production use cases you would need to create your own certificates. For our tutorial its OK though - you can just tell your browser to continue.
+When you are prompted for username and password simply use bridge username & password that you find in the installation script output.
+
+Now we are ready to do some actual deployments
+
+### Access your Git Repos
+Every keptn project internally holds a git repository containing all relevant files such as
+1. Shipyard.yaml: defines all automation sequences
+2. Helm charts: containing our Argo Rollout definitions
+3. SLIs & SLOs: define which metrics to analyze during a rollout sequence
+4. Test Files: define test cases, e.g: JMeter tests
+5. Any other random files, e.g: we will find some helper html & json files for our tutorial
+
+The tutorial comes with Gitea as a Git Web Service and every created Keptn project is linked to an upstream git repository in Gitea.
+The easiest to access Gitea is therefore through Keptn's project overview page and then follow the link to the upstream git as shown here:
+
+
+There is one helper file in the root directory of the main branch which is called `viewsimplenodeapp.html`. Please download that html file content locally and open that HTML in a browser. It will show us the actual deployed status of our simplenode app in staging and production. Initially it will not look like much because we haven't deployed anything yet. But - this is what you should see!
+
+
+## Deploy version #1
+Duration: 5:00
+
+Now lets deploy version 1 of our simplenode app. Let's start by using the Keptn CLI.
+We simply copy/paste the command that was given to us by the installation script output:
+```
+keptn trigger delivery --project=demo-rollout --stage=staging --service=simplenode --image=docker.io/grabnerandi/simplenodeservice --tag=1.0.0
+```
+
+Once this is triggered we can watch the progress in the Keptn's Bridge. The best is to follow the event-driven sequences in the sequences screen. If everything goes according to plan the following steps happen:
+1. Deployment will trigger a helm deployment including the Argo Rollout Blue/Green definition
+2. Release will promote the Argo Rollout to become the active deployment
+3. Tests will trigger a short JMeter test to test the application
+4. Evaluation will get-sli's from Dynatrace and then calculated the SLO Score
+5. Approval: If everything is we are asked to approve this build into production
+
+
+
+If you keep an eye on your browser that has the local html file open we should see the first version deployed in staging (left frame)!
+
+
+### Next steps for version 1?
+
+From this view we have several options on next steps:
+
+
+1. You can open the URL of the deployed app
+2. You can explore the SLO analysis
+3. You can approve version 1 into production
+
+Please walk through all of those - especially the approval
+
+Once we approve, Keptn will continue the sequences defined in the shipyard for the prod stage.
+Deploying version 1 in prod looks almost the same as in staging. This is what you should see until you reach the approval:
+1. Deployment will trigger a helm deployment including the Argo Rollout Canary definition
+2. Tests will be triggered to validate that production is still healthy
+3. Evaluation will get-sli's from Dynatrace
+4. We could now approve to do the Canary rollout!
+
+Now - Version 1 is very special because it is the first deployment and Argo Rollout by default simply rolls out a canary to 100% if it is the first deployment.
+We could therefore continue the process by selecting YES in the approval step. But - we wont see a whole lot as the canary is already at 100%.
+
+Therefore - lets just validate that you see exactly these steps, open the deployed link and evaluate the SLO results to make sure everything works as expected!
+
+
+We can also see that the our local html file has picked up the production deployment by now (right frame)!
+
+
+Now - lets do the same for version 2 to observe blue/green and canary deployments
+
+## Deploy version #2
+Duration: 5:00
+
+Now that version 1 of our simplenode app is running in staging and production its time to truly see the value of blue/green and canary!
+Lets trigger the delivery through the Keptn CLI as we have done it for build 1. Here is the command:
+```
+keptn trigger delivery --project=demo-rollout --stage=staging --service=simplenode --image=docker.io/grabnerandi/simplenodeservice --tag=2.0.0
+```
+
+This will kick off the delivery of version 2. You will see that it will fail in staging already as version 2 shows a high failure rate which results into a failed SLO evaluation.
+
+
+
+Lets try version 3 and see if we have more luck!
+
+## Deploy version #3
+Duration: 5:00
+
+Similar to version 2 we execute the following Keptn CLI command to trigger the delivery
+```
+keptn trigger delivery --project=demo-rollout --stage=staging --service=simplenode --image=docker.io/grabnerandi/simplenodeservice --tag=3.0.0
+```
+
+Version 3 is much better and should make it through the SLO Evaluation in staging. In order to promote it into production we simply approve it in the final step!
+Now we sit and wait until Version 3 is fully rolled out. While this is happening you can keep watching our special local HTML file to see how gradually the canary gets rolled out to all incoming traffic!
+
+
+
+Because build 3 should have no issues we should see it all the way going through alright!
+
+## Deploy version 4 using API
+Duration: 5:00
+
+Now to our final version - version 4.
+You can trigger it through the Keptn CLI if you want - but - to learn something new lets do it through the Keptn API.
+For that you can get the API Token from the top right menu where you can also get the link to the Swagger UI.
+In the Swagger UI use the API Token for the authorization of the POST Event API Endpoint. Then click on "Try Out". This then allows you to define the API Body which we use to define the Keptn Cloud Event to trigger the delivery sequence in the staging stage for build 4.
+
+Here is a rough overview of how this workflow looks like - below you find the details on the body to post into the edit field:
+
+
+The Keptn CLI we used for our first three deployments basically sends a Keptn Cloud Event to the Keptn API endpoint. And that's exactly what we are doing here. We are triggering a sequence in a particular project, stage and for a particular service and we pass some additional meta data such as the new image we want to deploy or also some labels (which we can also pass through the CLI but haven't done yet).
+
+To find the body please navigate to your Gitea UI for the demo-rollout project. In the main branch you will find a file called `staging.deployment.triggered.json`. This file contains the event that we want to send - already customized for your installation! It should look something like this:
+
+```json
+{
+ "type": "sh.keptn.event.staging.delivery.triggered",
+ "shkeptnspecversion": "0.2.0",
+ "specversion": "1.0",
+ "source": "triggered-via-api",
+ "contenttype": "application/json",
+ "data": {
+ "project": "demo-rollout",
+ "stage": "staging",
+ "service": "simplenode",
+ "labels": {
+ "deployWith": "ArgoRollout",
+ "buildId": "1.0.0",
+ "owner": "Andi"
+ },
+ "configurationChange": {
+ "values": {
+ "image": "docker.io/grabnerandi/simplenodeservice:1.0.0"
+ }
+ }
+ }
+}
+```
+
+Copy/Paste it from your gitea or even from here as it should be the same. Feel free to change the owner from Andi to some other name.
+Then click "Execute".
+As response of the API call we receive the Keptn Context. This unique ID can be used for other API calls to e.g: query the status of the sequence. This makes it easy to integrate Keptn with other tools, e.g.: Trigger it from Jenkins, GitLab, Azure DevOps ...
+
+What we should see is that Build 4 makes it into production but there is rejected and rolled back right after the initial deployment of the first canary step.
+
+
+The reason this happens is because build 4 results in a higher error rate ONLY when run in production and is therefore a build we don't want to keep in production.
+
+
+## Argo Rollouts with Keptn behind the scenes
+Duration: 2:00
+
+Argo Rollouts provide both Blue/Green and Canary deployments. For Canary you can define individual promotion steps.
+Argo Rollouts provides its own evaluation mechanism to decide whether to roll forward or not. In our scenario we want Keptn to have full control over the canary rollout steps which is why we use the `pause` step option to hand control to Keptn.
+
+Keptn's Argo-Service is the service that listens to Keptn's `release.started` event and will use this to promote the current canary to the next step.
+In case of a problem the Argo-Service listens to keptn's `rollback.started` event and will use it to abort the canary.
+
+For more details please have a look at the Helm Chart that you can find in the staging and prod namespace of your Keptn's Projects git repo.
+Also have a detailed look at the shipyard.yaml of your project. As of Keptn 0.9.x we have to use individual sequences to model the steps for a canary rollout. That means that you have to have a keptn sequence for each rollout pause step in your rollout definition. The plan is that this will be made easier in future Keptn version!
+
+
+## Troubleshooting
+Duration: 0:00
+
+Here some troubleshooting tips
+
+### Lost your Keptn's bridge username / password
+
+You can access this via the Keptn CLI. Just execute this:
+
+```
+echo Username: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_USERNAME}" | base64 --decode)
+echo Password: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_PASSWORD}" | base64 --decode)
+```
+
+### Installation of tutorial failed
+
+Please contact us through the [Keptn slack workspace](https://slack.keptn.sh). We have a channel called #keptn-docs where you can directly ping us about issues on the tutorials
+
+
+## Uninstall
+Duration: 0:00
+
+If you are done and want to uninstall the tutorial you can simply execute the following command
+```console
+k3s-uninstall.sh
+```
+
+This will delete the k3s cluster including keptn.
+If you also want to delete the git repository content you can also delete the local directory keptn-on-k3s
+
+## Other tutorials
+
+If you want to explore other tutorials that are possible with this keptn on k3s demo then have a look at more tutorials on https://tutorials.keptn.sh (we are still developing some of them) - or explore the content on [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s)
+
+## Finish
+Duration: 0:00
+
+In this tutorial, you have learned how to use Keptn to automate multi-stage delivery using Argo Rollouts for Blue/Green and Canary.
+The benefit that Keptn brings in is that it automates SLO validation, provides a promotion assistant, automates testing, integrates with your ChatOps tools, configures your monitoring and provides the automation across multi stages.
+
+While this tutorial has used tools such as JMeter and Dynatrace you can easily replace it without any automation sequence changes with tools such as Locust or Prometheus.
+
+
+### What we've covered
+
+- Install Keptn and the sample project for Argo Rollouts
+- Trigger delivery sequences through the Keptn CLI and API
+- See SLO-based Evaluation in action
+- How Argo Rollouts can be integrated with Keptn in terms of delivery, promotion and abort
+
+{{ snippets/09/community/feedback.md }}
diff --git a/site/tutorials/keptn-full-tour-dynatrace-011.md b/site/tutorials/keptn-full-tour-dynatrace-011.md
new file mode 100644
index 00000000..18eccdc9
--- /dev/null
+++ b/site/tutorials/keptn-full-tour-dynatrace-011.md
@@ -0,0 +1,197 @@
+summary: Full Keptn installation on a Kubernetes cluster (GKE recommended)
+id: keptn-full-tour-dynatrace-011
+categories: Dynatrace,aks,eks,gke,openshift,pks,minikube,full-tour,quality-gates,automated-operations
+tags: keptn011x,advanced
+status: Published
+authors: Florian Bacher
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+
+# Keptn Full Tour on Dynatrace
+
+## Welcome
+Duration: 2:00
+
+
+In this tutorial you'll get a full tour through Keptn. Before we get started you'll get to know what you will learn while you walk yourself through this tutorial.
+
+### What you'll learn
+- How to create a sample project
+- How to onboard a first microservice
+- How to deploy your first microservice with blue/green deployments
+- How to setup quality gates
+- How to prevent bad builds of your microservice to reach production
+- How to trigger the changes of feature toggles in response to issues detected in a production system
+
+
+You'll find a time estimate for completing this tutorial in the right top corner of your screen - this should give you guidance how much time is needed for each step.
+
+In this tutorial, we are going to install Keptn on a Kubernetes cluster, along with Istio for traffic routing and ingress control.
+
+- [Keptn](https://keptn.sh) as a control-plane for continuous delivery and automated operations
+- [Istio](https://istio.io) as the ingress and service mesh within the cluster for traffic routing between blue/green versions of our services
+- [Unleash](https://unleash.github.io/) as a feature toggle framework that is connected to Keptn to toggle features based on monitoring data
+
+
+The full setup that we are going to deploy is sketched in the following image.
+
+
+
+{{ snippets/010/install/cluster.md }}
+
+{{ snippets/010/install/istio.md }}
+
+{{ snippets/010/install/download-keptnCLI.md }}
+
+{{ snippets/010/install/install-full.md }}
+
+{{ snippets/010/install/configureIstio.md }}
+
+{{ snippets/010/install/authCLI-istio.md }}
+
+{{ snippets/010/monitoring/setupDynatrace.md }}
+
+{{ snippets/010/manage/createProject.md }}
+
+{{ snippets/010/manage/onboardService.md }}
+
+{{ snippets/010/monitoring/configureDynatraceSlis.md }}
+
+{{ snippets/010/quality-gates/setupQualityGate.md }}
+
+{{ snippets/010/self-healing/featureFlagsDynatrace.md }}
+
+
+
+## Finish
+Duration: 1:00
+
+### Congratulations
+
+Thanks for taking a full tour through Keptn!
+Although Keptn has even more to offer that should have given you a good overview what you can do with Keptn.
+
+### What we've covered
+
+
+- We have created a sample project with the Keptn CLI and set up a multi-stage delivery pipeline with the `shipyard` file.
+ ```
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-sockshop"
+spec:
+ stages:
+ - name: "dev"
+ sequences:
+ - name: "delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "test"
+ properties:
+ teststrategy: "functional"
+ - name: "evaluation"
+ - name: "release"
+ - name: "delivery-direct"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "staging"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "dev.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "test"
+ properties:
+ teststrategy: "performance"
+ - name: "evaluation"
+ - name: "release"
+ - name: "rollback"
+ triggeredOn:
+ - event: "staging.delivery.finished"
+ selector:
+ match:
+ result: "fail"
+ tasks:
+ - name: "rollback"
+ - name: "delivery-direct"
+ triggeredOn:
+ - event: "dev.delivery-direct.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "production"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "staging.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "release"
+ - name: "rollback"
+ triggeredOn:
+ - event: "production.delivery.finished"
+ selector:
+ match:
+ result: "fail"
+ tasks:
+ - name: "rollback"
+ - name: "delivery-direct"
+ triggeredOn:
+ - event: "staging.delivery-direct.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+ ```
+
+- We have set up quality gates based on service level objectives in our `slo` file.
+ ```
+ ---
+ spec_version: "0.1.1"
+ comparison:
+ aggregate_function: "avg"
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ number_of_comparison_results: 1
+ filter:
+ objectives:
+ - sli: "response_time_p95"
+ key_sli: false
+ pass: # pass if (relative change <= 10% AND absolute value is < 600ms)
+ - criteria:
+ - "<=+10%" # relative values require a prefixed sign (plus or minus)
+ - "<600" # absolute values only require a logical operator
+ warning: # if the response time is below 800ms, the result should be a warning
+ - criteria:
+ - "<=800"
+ weight: 1
+ total_score:
+ pass: "90%"
+ warning: "75%"
+ ```
+
+- We have tested our quality gates by deploying a bad build to our cluster and verified that Keptn quality gates stopped them.
+ 
+
+- We have set up self-healing by automated toggling of feature flags in Unleash.
+ 
+
+{{ snippets/010/integrations/gettingStarted.md }}
+
+{{ snippets/010/community/feedback.md }}
diff --git a/site/tutorials/keptn-full-tour-dynatrace-011/codelab.json b/site/tutorials/keptn-full-tour-dynatrace-011/codelab.json
new file mode 100644
index 00000000..b5e89e7a
--- /dev/null
+++ b/site/tutorials/keptn-full-tour-dynatrace-011/codelab.json
@@ -0,0 +1,35 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:48+01:00",
+ "id": "keptn-full-tour-dynatrace-011",
+ "duration": 104,
+ "title": "Keptn Full Tour on Dynatrace",
+ "authors": "Florian Bacher",
+ "summary": "Full Keptn installation on a Kubernetes cluster (GKE recommended)",
+ "source": "keptn-full-tour-dynatrace-011_gen.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "dynatrace",
+ "aks",
+ "eks",
+ "gke",
+ "openshift",
+ "pks",
+ "minikube",
+ "full-tour",
+ "quality-gates",
+ "automated-operations"
+ ],
+ "tags": [
+ "advanced",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-full-tour-dynatrace-011"
+}
diff --git a/site/tutorials/keptn-full-tour-prometheus-011.md b/site/tutorials/keptn-full-tour-prometheus-011.md
new file mode 100644
index 00000000..6b1a4cc0
--- /dev/null
+++ b/site/tutorials/keptn-full-tour-prometheus-011.md
@@ -0,0 +1,136 @@
+summary: Full Keptn installation on a Kubernetes cluster (GKE recommended)
+id: keptn-full-tour-prometheus-011
+categories: Prometheus,aks,eks,gke,openshift,pks,minikube,full-tour,quality-gates,automated-operations
+tags: keptn011x,advanced
+status: Published
+authors: Florian Bacher
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+
+# Keptn Full Tour on Prometheus
+
+## Welcome
+Duration: 2:00
+
+In this tutorial you'll get a full tour through Keptn. Before we get started you'll get to know what you will learn while you walk yourself through this tutorial.
+
+### What we will cover
+- How to create a sample project
+- How to onboard a first microservice
+- How to deploy your first microservice with blue/green deployments
+- How to setup quality gates
+- How to prevent bad builds of your microservice to reach production
+
+- How to integrate other tools like Slack, MS Team, etc in your Keptn integration
+
+You'll find a time estimate until the end of this tutorial in the right top corner of your screen - this should give you guidance how much time is needed for each step.
+
+
+In this tutorial, we are going to install Keptn on a Kubernetes cluster, along with Istio for traffic routing and ingress control.
+
+- [Keptn](https://keptn.sh) as a control-plane for continuous delivery and automated operations
+- [Istio](https://istio.io) as the ingress and service mesh within the cluster for traffic routing between blue/green versions of our services
+
+The full setup that we are going to deploy is sketched in the following image.
+
+
+{{ snippets/010/install/cluster.md }}
+
+{{ snippets/010/install/istio.md }}
+
+{{ snippets/010/install/download-keptnCLI.md }}
+
+{{ snippets/010/install/install-full.md }}
+
+{{ snippets/010/install/configureIstio.md }}
+
+{{ snippets/010/install/authCLI-istio.md }}
+
+{{ snippets/010/manage/createProject.md }}
+
+{{ snippets/010/manage/onboardService.md }}
+
+{{ snippets/010/monitoring/setupPrometheus.md }}
+
+{{ snippets/010/quality-gates/setupQualityGate.md }}
+
+{{ snippets/010/self-healing/upscalePrometheus.md }}
+
+
+## Finish
+Duration: 1:00
+
+Thanks for taking a full tour through Keptn!
+Although Keptn has even more to offer that should have given you a good overview what you can do with Keptn.
+
+### What we've covered
+
+- We have created a sample project with the Keptn CLI and set up a multi-stage delivery pipeline with the `shipyard` file
+
+ ```
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-sockshop"
+spec:
+ stages:
+ - name: "dev"
+ sequences:
+ - name: "artifact-delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "test"
+ properties:
+ teststrategy: "functional"
+ - name: "evaluation"
+ - name: "release"
+ - name: "artifact-delivery-db"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ ...
+ ```
+
+- We have set up quality gates based on service level objectives in our `slo` file
+ ```
+ ---
+ spec_version: "1.0"
+ comparison:
+ aggregate_function: "avg"
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ number_of_comparison_results: 1
+ filter:
+ objectives:
+ - sli: "response_time_p95"
+ key_sli: false
+ pass: # pass if (relative change <= 10% AND absolute value is < 600ms)
+ - criteria:
+ - "<=+10%" # relative values require a prefixed sign (plus or minus)
+ - "<600" # absolute values only require a logical operator
+ warning: # if the response time is below 800ms, the result should be a warning
+ - criteria:
+ - "<=800"
+ weight: 1
+ total_score:
+ pass: "90%"
+ warning: "75%"
+ ```
+
+
+- We have tested our quality gates by deploying a bad build to our cluster and verified that Keptn quality gates stopped them.
+ 
+
+
+
+{{ snippets/010/integrations/gettingStarted.md }}
+
+{{ snippets/010/community/feedback.md }}
diff --git a/site/tutorials/keptn-full-tour-prometheus-011/codelab.json b/site/tutorials/keptn-full-tour-prometheus-011/codelab.json
new file mode 100644
index 00000000..3028b3e3
--- /dev/null
+++ b/site/tutorials/keptn-full-tour-prometheus-011/codelab.json
@@ -0,0 +1,35 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:49+01:00",
+ "id": "keptn-full-tour-prometheus-011",
+ "duration": 86,
+ "title": "Keptn Full Tour on Prometheus",
+ "authors": "Florian Bacher",
+ "summary": "Full Keptn installation on a Kubernetes cluster (GKE recommended)",
+ "source": "keptn-full-tour-prometheus-011_gen.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "prometheus",
+ "aks",
+ "eks",
+ "gke",
+ "openshift",
+ "pks",
+ "minikube",
+ "full-tour",
+ "quality-gates",
+ "automated-operations"
+ ],
+ "tags": [
+ "advanced",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-full-tour-prometheus-011"
+}
diff --git a/site/tutorials/keptn-in-a-box-010/codelab.json b/site/tutorials/keptn-in-a-box-010/codelab.json
index 0b5b20f3..365cd6ab 100644
--- a/site/tutorials/keptn-in-a-box-010/codelab.json
+++ b/site/tutorials/keptn-in-a-box-010/codelab.json
@@ -20,7 +20,6 @@
"installation",
"microkubernetes",
"microk8s",
- "full-tour",
"quality-gates",
"performance-as-a-service",
"automated-operations"
diff --git a/site/tutorials/keptn-in-a-box-011.md b/site/tutorials/keptn-in-a-box-011.md
new file mode 100644
index 00000000..306ad0f3
--- /dev/null
+++ b/site/tutorials/keptn-in-a-box-011.md
@@ -0,0 +1,517 @@
+summary: 5 minute installation. All running on MicroK8s with a single line installation. No Kubernetes cluster needed.
+id: keptn-in-a-box-011
+categories: microk8s, dynatrace,installation, microkubernetes, microk8s,full-tour,quality-gates,performance-as-a-service,automated-operations
+tags: keptn011x,introduction
+status: Published
+authors: Sergio Hinojosa
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+# Keptn in a Box
+
+## Welcome Β
+Duration: 2:00
+
+
+
+In this tutorial you'll learn how to run and customize [Keptn-in-a-Box](https://github.com/keptn-sandbox/keptn-in-a-box). Keptn-In-A-Box is a Bash script that will convert a plain Ubuntu machine in a Single Node Kubernetes Cluster with Keptn installed and configured (among other cool features which will set sail for your autonomous cloud journey). The script is programmed in a modular way so you can select the π§© **installationBundle** that better suits your needs.
+
+*Keptn-in-a-Box is a π rocket launcher for enabling tutorials or workshops in an easy, fast and resource efficient way.*
+
+In a matter of minutes you'll have a fully configured **Single Node Kubernetes Cluster** for learning [Keptn tutorials](https://tutorials.keptn.sh), trying out new functionalities, building your own pipeline or even delivering **Performance-as-a-Self-Service**.
+
+[Keptn-in-a-Box](https://github.com/keptn-sandbox/keptn-in-a-box) runs on [microk8s](https://microk8s.io/), which is a simple production-grade upstream certified Kubernetes made for developers and DevOps.
+
+The mantra behind Keptn-In-A-Box is that you
+
+Positive
+: Spend **more** time **innovating** πβοΈ and *less* time *configuring* π£π
+
+
+
+
+*You can actually just run the program without any customization, but let's take the time to understand what Keptn-in-a-Box does for you and how you can customize the installation.*
+
+### What you'll learn
+- What are the requirements needed
+- How to customize Keptn-in-a-Box
+- How to run Keptn-in-a-Box
+- How to access the configured services
+- How to troubleshoot in case of troubles
+
+Positive
+: β° This tutorial is dynamic, meaning the time calculated depends on the customization you provide. The most common customizations are reflected in its own steps. As you go along on this tutorial you'll find `recommended`β¦Ώ and `optional`β steps which you'll be able to skip if not desired.
+
+| | |
+|------------|------------|
+| Recommended| β¦Ώ |
+| Optional | β |
+
+## Get your Ubuntu box
+Duration: 5:00
+
+
+
+### Prerequisite
+The only prerequisite for Keptn-in-a-Box is that you get an Ubuntu machine and that it has an internet connection. This can be a VirtualMachine running in your datacenter, on your laptop or in a cloud provider such as Microsoft Azure, Amazon Web Services, Google Cloud among others.
+The tested distributions are **Ubuntu Server 18.04 LTS & 20.04 LTS**
+
+#### βοΈGet a cloud VM
+Don't have a VM or a Cloud Account? Don't worry, here you can sign for a free tier in:
+ - [Amazon Web Services](https://aws.amazon.com/free/)
+ - [Microsoft Azure](https://azure.microsoft.com/en-us/free/)
+ - [Google Cloud](https://cloud.google.com/free)
+
+#### π»Get a local VM
+Want to try it locally? Not a problem. Check out [multipass](https://multipass.run/)! a great way for spinning instant Ubuntu VMs in Windows, Mac or Linux computers.
+
+### πSizing
+For most usecases we recommend `4 Cores`, `16 Gigs of RAM` and `20 Gigs of diskspace`. Our tests on aws have shown that the minimum required for running Keptn-in-a-Box with the default `installation bundle` is a t2.medium (2 vCPU and 4 Gib of RAM) and 10 Gigabytes of disk space. Nevertheless this won't leave much space for spinning other services or onboarding applications.
+
+For the π ultimate experience you could get a `t2.2xlarge` with 8 Cores, 32 Gigs of RAM and 20 Gigs of diskspace.
+
+### AWS sizings for reference
+
+
+Negative
+: Warning, a self-hosted Git Service ([Gitea](https://gitea.io/)) has been added to and makes Keptn-in-a-Box a bit chubby π, [here is the issue](https://github.com/keptn-sandbox/keptn-in-a-box/issues/12). This is for teaching and exposing the GitOps approach Keptn uses. You can either disable deploying Gitea with this variables `git_deploy=false` & `git_migrate=false` (In the Advanced Features section is explained with more detail how to add or remove modules). I highly recommend just to add more resources so all your pods can be deployed successfully (a Default installation needs atm xlarge).
+
+Below is a table for the sizing reference.
+
+| | | | |
+|----|-----------|-----------|------------------|
+| - |**Size** |**vCPUs** | **Memory (GiB)** |
+| π | t3.medium | 2 | 4 |
+| π | t3.large | 2 | 8 |
+| π | t3.xlarge | 4 | 16 |
+| π€ | t3.2xlarge| 8 | 32 |
+
+### βοΈ Open ports
+If you define security rules in the Cloud provider or on your datacenter, your instance only needs to have the following ports accessible:
+- 22 / SSH
+- 80 / HTTP
+- 443 / HTTPS
+
+### Other considerations
+Positive
+: The functions for Keptn-in-a-Box were developed under Ubuntu but this does not mean it won't work for other Operative systems such as CentOS, Fedora, openSuse, etc... If you are willing to try it out, we would love to hear the results. Just bear in mind that the installation of Microk8s depends on [Snapcraft package manager](https://snapcraft.io/). Your chances are quite high since Microk8s is available in 42 Linux flavours.
+
+## Login to your Ubuntu box
+Duration: 1:00
+
+### π» Login to your Ubuntu
+When your Ubuntu machine is up and running, let's log in into it.
+```bash
+ssh yourusername@the-bind-ip-or-dns
+```
+
+### Download `keptn-in-a-box.sh`
+Now let's download the `keptn-in-a-box.sh` file and make it executable.
+```bash
+curl -O https://raw.githubusercontent.com/keptn-sandbox/keptn-in-a-box/release-0.9.2/keptn-in-a-box.sh
+chmod +x keptn-in-a-box.sh
+```
+
+Positive
+: Any π¨customization will take place in the `keptn-in-a-box.sh` file you just downloaded.
+
+## β¦Ώ Dynatrace Integration
+Duration: 1:00
+
+For the best experience we recommend that you enable Dynatrace monitoring.
+
+By **only** providing your Dynatrace credentials, Keptn-in-a-Box will:
+- Install the OneAgent via the [OneAgent Operator](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/deploy-oneagent-k8/) for the Cluster and configure the Dynatrace Integration for Keptn.
+- Configure the Dynatrace Service in Keptn.
+- Deploy and connect to your Cluster the containerized [Dynatrace ActiveGate](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/monitoring/deploy-activegate-in-kubernetes/) for monitoring the [Kubernetes Cluster Utilization](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/monitoring/monitor-kubernetes-openshift-clusters/), [Kubernetes Events](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/monitoring/events/) and [Workloads](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/monitoring/monitor-workloads-kubernetes/).
+
+Positive
+: You have to bring your own Dynatrace tenant
+
+If you don't have a Dynatrace tenant yet, sign up for a [free trial](https://www.dynatrace.com/trial?utm_campaign=keptn) or a [developer account](https://www.dynatrace.com/developer/).
+
+Negative
+: If you don't want to empower your Box with Dynatrace, skip to
Configure your Domain
+
+## β¦Ώ Configure Dynatrace
+Duration: 6:00
+
+### Create a Dynatrace API Token
+Log in to your Dynatrace tenant and go to **Settings > Integration > Dynatrace API**. Then, create anew API token with the following permissions
+- Access problem and event feed, metrics, and topology
+- Read log content
+- Read configuration
+- Write configuration
+- Capture request data
+- Real user monitoring JavaScript tag management
+- Read metrics
+- Ingest metrics
+- Read entities
+
+Take a look at this screenshot to double check the right token permissions for you.
+
+
+### Create a Dynatrace PaaS Token
+In your Dynatrace tenant, go to **Settings > Integration > Platform as a Service**, and create a new PaaS Token.
+
+### Enter your Dynatrace Credentials
+Now that you have an API-Token and a PaaS-Token, we can enter the credentials.
+In the `keptn-in-a-box.sh` file enter your credentials under the section "Define Dynatrace Environment".
+```bash
+# ---- Define Dynatrace Environment ----
+# Sample: https://{your-domain}/e/{your-environment-id} for managed or https://{your-environment-id}.live.dynatrace.com for SaaS
+TENANT="https://mytenant.live.dynatrace.com"
+PAASTOKEN="myDynatracePaaSToken"
+APITOKEN="myDynatraceApiToken"
+```
+
+That's it! When you run the program, it will detect that you enter your credentials and will download and configure Dynatrace for you.
+
+Negative
+: Be sure that the Box is reachable by your Dynatrace environment and vice versa.
+
+## β Configure your Domain
+Duration: 3:00
+
+### Default configuration (public IP)
+By default Keptn-in-a-Box will `curl ifconfig.me` to get the public IP where it's running and will convert the IP into a magic π§ββοΈ domain with [nip.io](https://nip.io). For example if your Box is public accessible over the IP `116.203.255.68` it will convert it to `116-203-255-68.nip.io`. NIP.IO is a simple wildcard DNS resolution for any IP Address.
+
+Positive
+: Having a magic domain allows you to access as much services as you want with the help of π‘Kubernetes and Istio β΅οΈ. The kubernetes services will be defined as subdomains (or virtual Hosts) and resolved inside your K8s via either Istio Virtual Services or Ingresses.
+
+β
If your box has a public ip, you can go with the **defaults** and leave the `DOMAIN` variable **empty**.
+
+### Configuration for an internal IP
+
+Negative
+: If your box does **not** have a public ip, you'll have to configure the domain so you can access the services from outside the box.
+
+For example, I want to run Keptn-in-a-Box inside my home network and the VM gets the ip `192.168.0.10`. I will convert the IP to a magic domain. This way the requests to any subdomain, for example to [https://keptn.192.168.0.10.nip.io/api](https://keptn.192.168.0.10.nip.io/api) will get resolved to `192.168.0.10` and then kubernetes will take care of forwarding the request internally to the Keptn API service.
+
+Just enter the IP in a magic domain notation as shown below. The ip can contain dashes (-) or dots (.). I just like dashes more, they are prettier π.
+
+```bash
+# ---- Define your Domain ----
+DOMAIN="192-168-0-10.nip.io"
+```
+
+With the above example you'll be able to access the teaser at [http://192-168-0-10.nip.io](http://192-168-0-10.nip.io)
+
+
+
+## β Create a workshop user account
+Duration: 2:00
+
+π¨βπ» A common feature is to use this box for workshops providing access to a guest user. If you spin the instances with a private key and you don't want to share your SSH Key, this feature will create a user, clone the home directory of the `$USER` who runs the program with its folders (such as keptn-examples) and configurations for the `bash` and clients like `helm`, `istioctl`,`kubectl`, `docker` and `keptn`.
+
+The following variables will define the User Account and the SSH password. Set the variables as you desire:
+```bash
+# ---- Workshop User ----
+NEWUSER="dynatrace"
+NEWPWD="dynatrace"
+```
+This functionality is disabled by default but can be independently enabled with any installation bundle. The function flag
+
+```bash
+create_workshop_user=true
+```
+needs to be active and defined after the installation bundles section. More about **functions**, **control flags** and **installationBundles** in the step **select the Installation Bundle**.
+
+Negative
+: β οΈ This function will enable password authentication in `/etc/ssh/sshd_config` and restart the `sshd` service. The workshop user will also be part of the sudoers group.
+
+## Select the installation Bundle
+Duration: 7:00
+
+### β³ Programs logic
+Before selecting the installation Bundle, let's understand how `Keptn-in-a-box.sh` works and what it will do.
+
+[keptn-in-a-box.sh](https://github.com/keptn-sandbox/keptn-in-a-box/blob/master/keptn-in-a-box.sh) is the controller. Here we have been defining our variables. When executing this script, it will download and load the functions defined in [functions.sh](https://github.com/keptn-sandbox/keptn-in-a-box/blob/master/functions.sh). Which **β¨ functions** to execute are controlled by their **π¦control flags**. Now, an **π§©installation Bundle** is the enablement for multiple **control flags**.
+
+### π§©installation Bundles & π¦control flags
+Now that we have understood the delegation of the program's logic and it's main components, here is a table of the installation Bundles and their respective enabled flags:
+
+#### π§©installation Bundles
+- installationBundleDemo
+- installationBundleWorkshop
+- installationBundleKeptnOnly
+- installationBundleKeptnQualityGates
+- installationBundlePerformanceAsAService
+- installationBundleAll
+
+#### π¦control Flags
+
+| | | | | | | |
+|-------------------------------------|-----------|------------|--------------|--------------|--------------|-------------|
+|**π¦control flag** |**Demo** |**Workshop** |**KeptnOnly** |**QualityGates**|**PerfAaS** | **All** |
+| update_ubuntu | β
| β
| β
| β
| β
| β
|
+| docker_install | β
| β
| β
| β
| β
| β
|
+| microk8s_install | β
| β
| β
| β
| β
| β
|
+| setup_proaliases | β
| β
| β
| β
| β
| β
|
+| enable_k8dashboard | - | β
| - | β
| β
| β
|
+| enable_registry | - | - | - | - | - | β
|
+| istio_install | β
| β
| β
| - | - | β
|
+| helm_install | β
| β
| β
| - | β
| β
|
+| certmanager_install | - | - | - | - | - | β
|
+| certmanager_enable | - | - | - | - | - | β
|
+| git_deploy | β
| β
| - | - | - | β
|
+| git_migrate | β
| β
| - | - | - | β
|
+| keptn_install | β
| β
| β
| β
| β
| β
|
+| keptn_examples_clone | β
| β
| β
| β
| β
| β
|
+| resources_clone | β
| β
| β
| β
| β
| β
|
+| dynatrace_savecredentials | β
| β
| β
| β
| β
| β
|
+| dynatrace_configure_monitoring | β
| β
| β
| β
| β
| β
|
+| keptndeploy_homepage | β
| β
| β
| β
| β
| β
|
+| keptndemo_cartsload | β
| β
| - | - | - | β
|
+| keptndemo_unleash | β
| β
| - | - | - | β
|
+| keptndemo_unleash_configure | β
| β
| - | - | - | β
|
+| keptndemo_cartsonboard | β
| β
| - | - | - | β
|
+| expose_kubernetes_api | β
| β
| β
| β
| β
| β
|
+| expose_kubernetes_dashboard | - | β
| - | β
| β
| β
|
+| patch_kubernetes_dashboard | - | β
| - | - | - | β
|
+| create_workshop_user | - | - | - | - | - | β
|
+| jenkins_deploy | - | - | - | - | - | β
|
+| keptn_bridge_disable_login | β
| β
| - | - | - | β
|
+| keptn_install_qualitygates | - | - | - | β
| β
| β
|
+
+
+The **dynatrace_** control flags will be disabled if you don't enter your Dynatrace credentials.
+
+### The Default Installation Bundle
+π§©The default installation bundle is **installationBundleDemo**. You can change installation bundles by commenting them out in the section.
+```bash
+# ==================================================
+# ----- Select your installation Bundle ----- #
+# ==================================================
+# Uncomment for installing only Keptn
+# installationBundleKeptnOnly
+
+# - Comment out if selecting another bundle
+installationBundleDemo
+
+```
+### Enable or disable specific functionality
+You can also override and enable/disable specific modules after you select the installationBundle. For example lets enable the workshop account regardless of the installationBundle we selected.
+```bash
+# ==================================================
+# ---- Enable or Disable specific functions ----- #
+# ==================================================
+create_workshop_user=true
+```
+
+### The Installation function
+```bash
+# ==================================================
+# ----- Call the Installation Function ----- #
+# ==================================================
+doInstallation
+```
+At the end of `keptn-in-a-box.sh` we call the installation function. This function is defined at the end of the `functions.sh` file. This function defines the order in which the different modules are to be executed since they have a chronological dependency. For example, in order to onboard an application we first need to have Keptn installed, and Keptn needs Microk8s installed and so on...
+
+## Execute the script
+Duration: 5:00
+
+Yay! now let's see Keptn-in-a-box in action π€!
+
+Now that we understand how it works and we have customized the box as we want, let's trigger the installation.
+
+Run the script with sudo rights and send the process to the background.
+```bash
+sudo bash -c './keptn-in-a-box.sh &'
+```
+
+Why run it in the background and where is the output of the program you say? Well, keptn-in-a-box is actually optimized to be executed for non-interactive shells at the initialization of an instance. This is done programmatically passing the script as [user data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) while creating dynatrace environments and spinning multiple instances for each student. This is achieved with the [Dynatrace Rest Tenant Automation](https://github.com/sergiohinojosa/Dynatrace-REST-Tenant-Automation) program. Yes, we love β€οΈ automation π€and the customization, creation and configuration of environments and instances is done programmatically.
+
+### What happens in the background
+The script will clone the keptn-in-a-box repository in the π home directory of the user that executed it. It will execute the functions marked as `true`. The installation will take between 4 and 10 minutes, depending on the amount of features, internet connection speed and computing power available.
+
+### πInspect the script at runtime
+To inspect how the installation is going, type
+```bash
+less +F /tmp/kiab-install.log
+```
+This will open the installation log and read from the input stream. To exit just type `CTRL + C` and then `quit`.
+
+### Installation complete π
+At the end of the installation file you should see something similar
+
+```bash
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |======================================================================
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |============ Installation complete :) ============
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |______________________________________________________________________
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> It took 8 minutes and 10 seconds <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |======================================================================
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |============ Keptn & Kubernetes Exposed Ingress Endpoints ============
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |______________________________________________________________________
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> Below youll find the adresses and the credentials to the exposed services. <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> We wish you a lot of fun in your Autonomous Cloud journey! <-<-<|
+
+NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE
+default homepage-ingress
192-168-0-10.nip.io 127.0.0.1 80, 443 2m48s
+default k8-api-ingress api.kubernetes.192-168-0-10.nip.io 127.0.0.1 80, 443 4m16s
+istio-system istio-ingress * 127.0.0.1 80, 443 4m16s
+istio-system sockshop-ingress carts.sockshop-dev.192-168-0-10.nip.io,carts.sockshop-staging.192-168-0-10.nip.io,carts.sockshop-production.192-168-0-10.nip.io 127.0.0.1 80, 443 12s
+istio-system unleash-ingress unleash.unleash-dev.192-168-0-10.nip.io 127.0.0.1 80, 443 2m22s
+jenkins jenkins-ingress jenkins.192-168-0-10.nip.io 127.0.0.1 80, 443 2m1s
+keptn api-keptn-ingress keptn.192-168-0-10.nip.io 127.0.0.1 80, 443 2m54s
+kube-system k8-dashboard-ingress kubernetes.192-168-0-10.nip.io 127.0.0.1 80, 443 4m16s
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |======================================================================
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |============ Unleash-Server Access ============
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |______________________________________________________________________
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> Username: keptn <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> Password: keptn <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |======================================================================
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |============ Jenkins-Server Access ============
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |______________________________________________________________________
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> Username: keptn <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> Password: keptn <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |======================================================================
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |============ Workshop User Access (SSH Access) ============
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |______________________________________________________________________
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> ssh student@192-168-0-10.nip.io <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |>->-> Password: secr3t <-<-<|
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |======================================================================
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |============ Git-Server Access (SSH Access) ============
+[Keptn-In-A-Box|INFO] [2020-08-04 17:01:23] |______________________________________________________________________
+...
+```
+
+## Access your services and innovate
+Duration: 4:00
+
+Let's say we selected the π§©**installationBundleWorkshop** and we installed keptn-in-a-box in a VM in our home network and the student is `dynatrace` with the password `dynatrace` and the domain is 192-168-0-10.nip.io (for ip 192.168.0.10)
+
+After a shell login
+```bash
+ssh dynatrace@192-168-0-10.nip.io
+```
+### π The home directory
+List the content of the home directory:
+```bash
+ls
+examples keptn-in-a-box snap
+```
+You'll have 3 directories; a clone of the **keptn examples**, a clone of **keptn-in-a-box** repository and the configuration of microk8s in snap.
+
+### π» Configured clients
+The clients are configured and ready to use `helm`, `istioctl`,`kubectl`, `docker` and `keptn`.
+
+For example type:
+```bash
+keptn status
+Starting to authenticate
+Successfully authenticated
+Using a file-based storage for the key because the password-store seems to be not set up.
+CLI is authenticated against the Keptn cluster https://keptn.192-168-0-10.nip.io/api
+```
+to see that keptn is installed and already configured or type
+
+```bash
+kubectl get all -n sockshop-dev
+```
+to list the cart sample pods and services of the development stage. You'll notice that autocomplete is also enabled.
+
+### π» Available services
+| | |
+|------------------------|--------------------------------------------------------|
+|**Service** | **URL** |
+|Teaser | https://192-168-0-10.nip.io |
+|Kubernetes Dashb. | https://kubernetes.192-168-0-10.nip.io |
+|Kubernetes API | https://api.kubernetes.192-168-0-10.nip.io |
+|Git-Server Repos | https://git.192-168-0-10.nip.io/explore/repos |
+|Git-Server API | https://git.192-168-0-10.nip.io/api/swagger |
+|Keptn API (swagger) | https://keptn.192-168-0-10.nip.io/api/swagger-ui |
+|Keptn Bridge | https://keptn.192-168-0-10.nip.io/bridge |
+|Unleash | https://unleash.unleash-dev.192-168-0-10.nip.io |
+|Carts pipeline overview | https://192-168-0-10.nip.io/pipeline/ |
+
+The Teaser contains links to most of the available services. You can print also the services by showing the configured ingresses in kubernetes
+```bash
+kubectl get ing -A
+```
+
+### Continue innovating π
+
+Now that you have your single node Kubernetes Cluster configured and up and running, you are all set to continue your journey to the autonomous cloud. Start typing `kubectl` commands, onboard applications with `keptn`, or maybe create your own **unbreakable pipeline** locally? What about creating your own Keptn Service? Take a look at more [Keptn tutorials](https://tutorials.keptn.sh/).
+
+## Troubleshooting
+
+Negative
+: If you face any issue please don't hesitate in reaching out to the Keptn team on [https://slack.keptn.sh ](https://slack.keptn.sh ), we will be more than glad to help.
+
+
+If you want to verify that the installation finished without errors. Inspect the log with this command:
+```bash
+cat /tmp/kiab-install.log | grep -i error
+```
+If there is no output it means that no error was found in the installation log.
+
+If there is an issue, or maybe there wasn't but you want to start new, here is how we recommend to reboot your Microk8s and everything installed in your single kubernetes cluster.
+
+Search the `functions.sh` file, it should be inside the `keptn-in-a-box` directory. Load it to the shell and run the function `removeMicrok8s`
+
+```bash
+source functions.sh && removeMicrok8s
+```
+This will ask for the sudo password since we are basically removing microk8s and purging its data with the command `sudo snap remove microk8s --purge` . It will also print the directories that were cloned such as the examples or keptn-in-a-box. It's recommended to delete them if you are upgrading versions. Before deleting any directory make sure to save a copy of `keptn-in-a-box.sh` or at least the variables defined in there for example the `DOMAIN` or the **Dynatrace credentials**.
+
+Positive
+: After you have deleted microk8s, you can rerun the installer the same way you did before.
+
+## Advanced features
+
+Positive
+: Keptn in a box is build in a modular way, this means that you can enable functionality for exploring new features. Let me show you how to do this.
+
+We load the functions in the current shell and we set the flag for all features to be active. Then we can enable module by module.
+
+```bash
+source functions.sh && installationBundleAll
+```
+
+Here are some examples:
+
+### Enable Jenkins
+Now we enable the modules we want to activate, for example we want to deploy Jenkins, we type
+
+```bash
+jenkinsDeploy
+```
+
+### Enable Kubernetes Dashboard and patch its access
+Let's say we want to enable the default Kubernetes Dashboard and disable the login for experimenting and learning.
+```bash
+microk8sEnableDashboard
+exposeK8Services
+patchKubernetesDashboard
+```
+### Enable own GIT server & Migrate Keptn projects to it.
+
+```bash
+DOMAIN=$(kubectl get configmap domain -n default -ojsonpath={.data.domain})
+gitDeploy
+gitMigrate
+```
+
+### Create a(nother) Workshop account.
+Now we to create a workshop account for Tony. For this we need elevated rights since we will restart the SSH service, enable authentication via SSH. Since we want to copy the home directory and it's properties of the user (e.g. ubuntu) but also need root rights, this needs a couple of more steps.
+
+As the normal user (not root) we type the following series of commands.
+First become root and load the functions in the shell (again since it's a new shell)
+```bash
+sudo su
+source functions.sh && installationBundleAll
+```
+Then we tell the script from where we copy the home directory and settings. This is normally the user you were before which is by default saved in the var `$SUDO_USER` in interactive shells. We also mark the new userid and it's password. Finally we run the function.
+```bash
+USER=$SUDO_USER
+NEWUSER="tony"
+NEWPWD="SuperTony"
+createWorkshopUser
+```
+Now tony is able to login to the KIAB machine via SSH and interact with `kubectl` and `keptn`.
+
+
+{{ snippets/010/community/feedback.md }}
+
+
+
diff --git a/site/tutorials/keptn-in-a-box-011/codelab.json b/site/tutorials/keptn-in-a-box-011/codelab.json
new file mode 100644
index 00000000..708a50ab
--- /dev/null
+++ b/site/tutorials/keptn-in-a-box-011/codelab.json
@@ -0,0 +1,33 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:50+01:00",
+ "id": "keptn-in-a-box-011",
+ "duration": 36,
+ "title": "Keptn in a Box",
+ "authors": "Sergio Hinojosa",
+ "summary": "5 minute installation. All running on MicroK8s with a single line installation. No Kubernetes cluster needed.",
+ "source": "keptn-in-a-box-011_gen.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "microk8s",
+ "dynatrace",
+ "installation",
+ "microkubernetes",
+ "microk8s",
+ "quality-gates",
+ "performance-as-a-service",
+ "automated-operations"
+ ],
+ "tags": [
+ "introduction",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-in-a-box-011"
+}
diff --git a/site/tutorials/keptn-litmus-011.md b/site/tutorials/keptn-litmus-011.md
new file mode 100644
index 00000000..94d91923
--- /dev/null
+++ b/site/tutorials/keptn-litmus-011.md
@@ -0,0 +1,407 @@
+summary: Full Keptn installation on a Kubernetes cluster (GKE recommended)
+id: keptn-litmus-011
+categories: Prometheus,aks,eks,gke,openshift,pks,minikube,quality-gates,litmus
+tags: keptn011x,advanced
+status: Published
+authors: JΓΌrgen Etzlstorfer
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+
+# Resilience evaluation with LitmusChaos, Prometheus, and Keptn
+
+## Welcome
+Duration: 2:00
+
+In this tutorial, we'll set up a demo application and have it undergo some chaos in combination with load testing. We will then use Keptn quality gates to evaluate the resilience of the application based on SLO-driven quality gates.
+
+
+
+### What we will cover
+- How to create a sample project and create a sample service
+- How to setup quality gates
+- How to add the Litmus integration and execute chaos
+- How to evaluate application resilience
+
+You'll find a time estimate until the end of this tutorial in the right top corner of your screen - this should give you guidance how much time is needed for each step.
+
+
+In this tutorial, we are going to install Keptn on a Kubernetes cluster.
+
+The full setup that we are going to deploy is sketched in the following image.
+
+
+If you are interested, please have a look at this presentation from Litmus and Keptn maintainers presenting the initial integration.
+
+
+
+
+{{ snippets/010/install/cluster.md }}
+
+{{ snippets/010/install/istio.md }}
+
+{{ snippets/010/install/download-keptnCLI.md }}
+
+{{ snippets/010/install/install-full.md }}
+
+{{ snippets/010/install/configureIstio.md }}
+
+{{ snippets/010/install/authCLI-istio.md }}
+
+## Download demo resources
+Duration: 1:00
+
+Demo resources are prepared for you on Github for a convenient experience. We are going to download them to a local machine so we have them handy.
+
+```
+git clone --branch=release-0.2.0 https://github.com/keptn-sandbox/litmus-service.git --single-branch
+```
+
+Now, let's switch to the directory including the demo resources.
+
+```
+cd litmus-service/test-data
+```
+
+
+## Install Litmus Operator & Chaos CRDs
+Duration: 3:00
+
+1. Let us install LitmusChaos into our Kubernetes cluster. This can be done via `kubectl`.
+
+ ```
+ kubectl apply -f ./litmus/litmus-operator-v1.13.2.yaml
+ ```
+
+1. We are going to create a namespace where we are later executing our chaos experiments.
+
+ ```
+ kubectl create namespace litmus-chaos
+ ```
+
+1. We also need to create the custom resources for the experiments we want to run later, as well as some permissions.
+
+ ```
+ kubectl apply -f ./litmus/pod-delete-ChaosExperiment-CR.yaml
+
+ kubectl apply -f ./litmus/pod-delete-rbac.yaml
+ ```
+
+## Setup Prometheus
+Duration: 3:00
+
+Before we are going to create the project with Keptn, we'll install the Prometheus integration to be ready to fetch the data that is later on needed for the SLO-based quality gate evaluation.
+
+Keptn doesn't install or manage Prometheus and its components. Users need to install Prometheus and Prometheus Alert manager as a prerequisite.
+
+* To install the Prometheus and Alert Manager, execute:
+
+```
+kubectl create ns monitoring
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm install prometheus prometheus-community/prometheus --namespace monitoring
+```
+
+### Execute the following steps to install prometheus-service
+
+* Download the Keptn's Prometheus service manifest
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/prometheus-service/release-0.6.0/deploy/service.yaml
+```
+
+* Replace the environment variable value according to the use case and apply the manifest
+
+```
+# Prometheus installed namespace
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" PROMETHEUS_NS="monitoring"
+
+# Setup Prometheus Endpoint
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" PROMETHEUS_ENDPOINT="http://prometheus-server.monitoring.svc.cluster.local:80"
+
+# Alert Manager installed namespace
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" ALERT_MANAGER_NS="monitoring"
+```
+
+* Install Role and Rolebinding to permit Keptn's prometheus-service for performing operations in the Prometheus installed namespace.
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/prometheus-service/release-0.6.0/deploy/role.yaml -n monitoring
+```
+
+
+
+### Optional: Verify Prometheus setup in your cluster
+
+* To verify that the Prometheus scrape jobs are correctly set up, you can access Prometheus by enabling port-forwarding for the prometheus-service:
+
+```
+kubectl port-forward svc/prometheus-server 8080:80 -n monitoring
+```
+
+
+## Setup Litmus integration
+Duration: 1:00
+
+Similar to the Prometheus integration, we are now adding the Litmus integration. This integration will be responsible to trigger the experiments with Litmus and listens for `sh.keptn.event.test.triggered` events that are sent from Keptn.
+
+This can be done via the following command.
+
+```
+kubectl apply -f ../deploy/service.yaml
+```
+
+We now have all the integrations installed and connected to the Keptn control plane. Let's move on with setup up a project!
+
+## Create project
+Duration: 1:00
+
+A project in Keptn is the logical unit that can hold multiple (micro)services. Therefore, it is the starting point for each Keptn installation.
+We have already cloned the demo resources from Github, so we can go ahead and create the project.
+
+
+**Recommended:** Create a new project with Git upstream:
+
+To configure a Git upstream for this tutorial, the Git user (`--git-user`), an access token (`--git-token`), and the remote URL (`--git-remote-url`) are required. If a requirement is not met, go to [the Keptn documentation](https://keptn.sh/docs/0.9.0/manage/git_upstream/) where instructions for GitHub, GitLab, and Bitbucket are provided.
+
+Let's define the variables before running the command:
+
+
+
+```
+GIT_USER=gitusername
+GIT_TOKEN=gittoken
+GIT_REMOTE_URL=remoteurl
+```
+
+Now let's create the project using the `keptn create project` command.
+
+```
+keptn create project litmus --shipyard=./shipyard.yaml --git-user=$GIT_USER --git-token=$GIT_TOKEN --git-remote-url=$GIT_REMOTE_URL
+```
+
+
+**Alternatively:** If you don't want to use a Git upstream, you can create a new project without it but please note that this is not the recommended way:
+
+
+```
+keptn create project litmus --shipyard=./shipyard.yaml
+```
+
+
+For creating the project, the tutorial relies on a `shipyard.yaml` file as shown below:
+
+```
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-litmus-chaos"
+spec:
+ stages:
+ - name: "chaos"
+ sequences:
+ - name: "delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "test"
+ properties:
+ teststrategy: "performance"
+ - name: "evaluation"
+```
+
+In the `shipyard.yaml` shown above, we define a single stage called *chaos* with a single sequence called *delivery*. In this sequence, a *deployment*, *test*, and *evaluation* task is defined (along with some properties). With this, Keptn sets up the environment and makes sure, that tests are triggered after each deployment, and the tests are then evaluated by Keptn quality gates. As we do not have a subsequent stage, we do not need an *approval* or *release* task.
+
+## Create a service
+Duration: 2:00
+
+After creating the project, services can be created for our project.
+For this purpose we need the helm charts as a tar.gz archive. To archive it use following command:
+
+
+```
+tar cfvz ./helloservice/helm.tgz ./helloservice/helm
+```
+
+1. Create the **helloservice** service using the [keptn create service](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_create_service/) and [keptn add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/)commands:
+
+
+ ```
+ keptn create service helloservice --project=litmus
+ keptn add-resource --project=litmus --service=helloservice --all-stages --resource=./helloservice/helm.tgz --resourceUri=helm/helloservice.tgz
+ ```
+
+1. After creating the service, tests need to be added as basis for quality gates. We are using JMeter tests, as the JMeter service comes "batteries included" with our Keptn installation. Although this could be changed to other testing tools, we are going with JMeter in this tutorial. Let's add some JMeter tests as well as a configuration file to Keptn.
+
+
+ ```
+ keptn add-resource --project=litmus --stage=chaos --service=helloservice --resource=./jmeter/load.jmx --resourceUri=jmeter/load.jmx
+ keptn add-resource --project=litmus --stage=chaos --service=helloservice --resource=./jmeter/jmeter.conf.yaml --resourceUri=jmeter/jmeter.conf.yaml
+ ```
+
+Now each time Keptn triggers the test execution, the JMeter service will pick up both files and execute the tests.
+
+## Configure Quality Gate
+Duration: 2:00
+
+We have not yet added our quality gate, i.e., the evaluation of several SLOs done by Keptn. Let's do this now!
+
+
+1. First, we are going to add an SLI file that holds all service-level indicators we want to evaluate along with their PromQL expressions. Learn more about the concept of [Service-Level Indicators in the Keptn docs](https://keptn.sh/docs/concepts/quality_gates/#what-is-a-service-level-indicator-sli).
+
+ ```
+ keptn add-resource --project=litmus --stage=chaos --service=helloservice --resource=./prometheus/sli.yaml --resourceUri=prometheus/sli.yaml
+ ```
+
+1. Now that we have added our SLIs, let us add the quality gate in terms of an `slo.yaml` which adds objectives for our metrics that have to be satisfied. earn more about the concept of [Service-Level Objectives in the Keptn docs](https://keptn.sh/docs/concepts/quality_gates/#what-is-a-service-level-objective-slo).
+
+ ```
+ keptn add-resource --project=litmus --stage=chaos --service=helloservice --resource=helloservice/slo.yaml --resourceUri=slo.yaml
+ ```
+
+We've now added our quality gate, let's move on to add the chaos instructions and then run our experiment!
+
+## Adding Litmus Chaos Experiment to Keptn
+Duration: 1:00
+
+We have already installed LitmusChaos on our Kubernetes cluster, but we have not yet added or executed a chaos experiment. Let's do this now!
+
+Let us add the `experiment.yaml` file that holds the chaos experiment instructions. It will be picked up by the LitmusChaos integration of Keptn each time a test is triggered. Therefore, Keptn makes sure that both, JMeter tests as well as LitmusChaos tests, are executed during the `test` task sequence.
+
+```
+keptn add-resource --project=litmus --stage=chaos --service=helloservice --resource=./litmus/experiment.yaml --resourceUri=litmus/experiment.yaml
+```
+
+Great job - the file is added and we can move on!
+
+## Configure Prometheus
+Duration: 2:00
+
+Before we are going to run the experiment, we have to make sure that we have some observability software in place that will actually monitor how the service is behaving under the testing conditions.
+
+1. Let's use the Keptn CLI to configure Prometheus. It will set up a Prometheus deployment and configures it to be ready for Keptn usage.
+
+ ```
+ keptn configure monitoring prometheus --project=litmus --service=helloservice
+ ```
+
+1. Next, we are going to add a `blackbox-exporter` for Prometheus that is able to observe our service under test from the outside, i.e., as a blackbox.
+
+ ```
+ kubectl apply -f ./prometheus/blackbox-exporter.yaml
+ kubectl apply -f ./prometheus/prometheus-server-conf-cm.yaml -n monitoring
+ ```
+
+1. Finally, restart Prometheus to pick up the new configuration
+
+ ```
+ kubectl delete pod -l app=prometheus-server -n monitoring
+ ```
+
+Now everything is in place, let's run our experiments and evaluate the resilience of our demo application!
+
+## Run experiment
+Duration: 4:00
+
+We are now ready to kick off a new deployment of our test application with Keptn and have it deployed, tested, and evaluated.
+
+1. Let us now trigger the deployment, tests, and evaluation of our demo application.
+
+ ```
+ keptn trigger delivery --project=litmus --service=helloservice --image=jetzlstorfer/hello-server:v0.1.1
+ ```
+
+1. Let's have a look in the Keptn bridge what is actually going on. We can use this helper command to retrieve the URL of our Keptn bridge.
+
+ ```
+ echo http://$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')/bridge
+ ```
+
+ The credentials can be retrieved via the following commands:
+
+
+ ```
+ echo Username: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_USERNAME}" | base64 --decode)
+ echo Password: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_PASSWORD}" | base64 --decode)
+ ```
+
+ 
+
+1. We can see that the evaluation failed, but why is that?
+
+ 
+
+1. Let's take a look at the evaluation - lick on the *chart* icon in the red evaluation tile.
+
+ 
+
+ We can see that the evaluation failed because both the `probe_duration_ms` as well as the `probe_success_percentage` SLOs did not meet their criteria.
+ Considering the fact that our chaos experiment did delete the pod of our application, we might want to increase the number of replicas that are running to make our application more resilient. Let's do this in the next step.
+
+
+## Increase resilience
+Duration: 3:00
+
+1. Let's do another run of our deployment, tests, and evaluation. But this time, we are increasing the `replicaCount` meaning that we run 3 instances of our application. If one of those get deleted by Litmus, the two others should still be able to serve the traffic.
+This time we are using the `keptn send event` command with an event payload that has been already prepared for the demo (i.e., the `replicaCount` is set to 3).
+
+ ```
+ keptn send event -f helloservice/deploy-event.json
+ ```
+
+1. Let's have a look at the second run. We can see that this time the evaluation was successful.
+
+ 
+
+1. Taking a look at the detailed evaluation results we can see that all probes were successful and did finish within the objectives we have set.
+
+ 
+
+1. If you want, you can now experiment with different SLOs or different `replicaCount` to evaluate the resilience of your application in terms of being responsive when the pod of this application gets deleted. Keptn will make sure that JMeter tests and chaos tests are executed each time you run the experiment.
+
+## Finish
+Duration: 1:00
+
+Congratulations! You have successfully completed this tutorial and evaluated the resilience of a demo microservice application with LitmusChaos and Keptn.
+
+### What we've covered in this tutorial
+
+- We've created a project based on a `shipyard` definition.
+ ```
+ apiVersion: "spec.keptn.sh/0.2.0"
+ kind: "Shipyard"
+ metadata:
+ name: "shipyard-litmus-chaos"
+ spec:
+ stages:
+ - name: "chaos"
+ sequences:
+ - name: "delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "test"
+ properties:
+ teststrategy: "performance"
+ - name: "evaluation"
+ ```
+
+- We've added the Litmus integration and did a successful run of a sequence with JMeter + LitmusChaos
+
+ 
+
+- We've executed chaos tests and evaluated their impact on our application
+
+ 
+
+- We've increased resilience by adding more instances of our demo application to the game
+ 
+
+
+
+{{ snippets/010/community/feedback.md }}
diff --git a/site/tutorials/keptn-litmus-011/codelab.json b/site/tutorials/keptn-litmus-011/codelab.json
new file mode 100644
index 00000000..9a49d5ba
--- /dev/null
+++ b/site/tutorials/keptn-litmus-011/codelab.json
@@ -0,0 +1,34 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:51+01:00",
+ "id": "keptn-litmus-011",
+ "duration": 44,
+ "title": "Resilience evaluation with LitmusChaos, Prometheus, and Keptn",
+ "authors": "JΓΌrgen Etzlstorfer",
+ "summary": "Full Keptn installation on a Kubernetes cluster (GKE recommended)",
+ "source": "keptn-litmus-011_gen.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "prometheus",
+ "aks",
+ "eks",
+ "gke",
+ "openshift",
+ "pks",
+ "minikube",
+ "quality-gates",
+ "litmus"
+ ],
+ "tags": [
+ "advanced",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-litmus-011"
+}
diff --git a/site/tutorials/keptn-multistage-qualitygates-010/codelab.json b/site/tutorials/keptn-multistage-qualitygates-010/codelab.json
index d3c563b2..e31152c2 100644
--- a/site/tutorials/keptn-multistage-qualitygates-010/codelab.json
+++ b/site/tutorials/keptn-multistage-qualitygates-010/codelab.json
@@ -21,7 +21,6 @@
"gke",
"openshift",
"minikube",
- "full-tour",
"quality-gates"
],
"tags": [
diff --git a/site/tutorials/keptn-multistage-qualitygates-011.md b/site/tutorials/keptn-multistage-qualitygates-011.md
new file mode 100644
index 00000000..8a6190dd
--- /dev/null
+++ b/site/tutorials/keptn-multistage-qualitygates-011.md
@@ -0,0 +1,511 @@
+summary: Full Keptn installation on a Kubernetes cluster (GKE recommended)
+id: keptn-multistage-qualitygates-011
+categories: Prometheus,aks,eks,gke,openshift,minikube,full-tour,quality-gates
+tags: keptn011x,advanced
+status: Published
+authors: Gabriel Tanner
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+# Multi-stage delivery with Quality Gates using Prometheus and Podtato-head application
+
+## Welcome
+Duration: 2:00
+
+In this tutorial we'll set up the Podtato-head demo application which will feature different Prometheus metrics and deploy the application using multistage delivery. We will then use Keptn quality gates to evaluate the quality of the application based on SLO-driven quality gates.
+
+### What we will cover
+
+- How to create a sample project and create a sample service
+- How to setup quality gates
+- How to use Prometheus metrics in our SLIs & SLOs
+- How to prevent bad builds of your microservice to reach production
+
+In this tutorial, we are going to install Keptn on a Kubernetes cluster.
+
+The full setup that we are going to deploy is sketched in the following image.
+
+
+
+If you are interested, please have a look at this article that explains the deployment in more detail.
+
+[Modern continuous delivery on Kubernetes for Developers - dev.to](https://dev.to/gabrieltanner/modern-continuous-delivery-on-kubernetes-for-developers-5chf)
+
+{{ snippets/010/install/cluster.md }}
+
+{{ snippets/010/install/istio.md }}
+
+{{ snippets/010/install/download-keptnCLI.md }}
+
+{{ snippets/010/install/install-full.md }}
+
+{{ snippets/010/install/configureIstio.md }}
+
+{{ snippets/010/install/authCLI-istio.md }}
+
+## Download the demo resources
+Duration: 1:00
+
+The demo resources can be found on Github for a convenient experience. Let's clone the project's repository, so we have all the resources needed to get started.
+
+
+```
+git clone https://github.com/cncf/podtato-head.git
+```
+
+Now, let's switch to the directory including the demo resources.
+
+
+```
+cd podtato-head/delivery/keptn
+```
+
+## Create project
+Duration: 1:00
+
+A project in Keptn is the logical unit that can hold multiple (micro)services. Therefore, it is the starting point for each Keptn installation.
+We have already cloned the demo resources from Github, so we can go ahead and create the project.
+
+**Recommended:** Create a new project with Git upstream:
+
+To configure a Git upstream for this tutorial, the Git user (`--git-user`), an access token (`--git-token`), and the remote URL (`--git-remote-url`) are required. If a requirement is not met, go to [the Keptn documentation](https://keptn.sh/docs/0.9.0/manage/git_upstream/) where instructions for GitHub, GitLab, and Bitbucket are provided.
+
+Let's define the variables before running the command:
+
+```
+GIT_USER=gitusername
+GIT_TOKEN=gittoken
+GIT_REMOTE_URL=remoteurl
+```
+
+Now let's create the project using the `keptn create project` command.
+
+```
+keptn create project pod-tato-head --shipyard=./shipyard.yaml --git-user=$GIT_USER --git-token=$GIT_TOKEN --git-remote-url=$GIT_REMOTE_URL
+```
+
+**Alternatively:** If you don't want to use a Git upstream, you can create a new project without it but please note that this is not the recommended way:
+
+
+```
+keptn create project pod-tato-head --shipyard=./shipyard.yaml
+```
+
+For creating the project, the tutorial relies on a `shipyard.yaml` file as shown below:
+
+```
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-sockshop"
+spec:
+ stages:
+ - name: "hardening"
+ sequences:
+ - name: "delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "test"
+ properties:
+ teststrategy: "performance"
+ - name: "evaluation"
+ - name: "release"
+ - name: "production"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "hardening.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "release"
+```
+
+In the `shipyard.yaml` shown above, we define two stages called *hardening* and *production* with a single sequence called *delivery*. The *hardening* stage defines a *delivery* sequence with a deployment, test, evaluation and release task (along with some other properties) while the *production* stage only includes a deployment and release task. The *production* stage also features a *triggeredOn* properties which defines when the stage will be executed (in this case after the hardening stage has finished the delivery sequence). With this, Keptn sets up the environment and makes sure, that tests are triggered after each deployment, and the tests are then evaluated by Keptn quality gates. Keptn performs a blue/green deployment (i.e., two deployments simultaneously with routing of traffic to only one deployment) and triggers a performance test in the hardening stage. Once the tests complete successfully, the deployment moves into the production stage using another blue/green deployment.
+
+## Create a service
+Duration: 2:00
+
+After creating the project, we can continue by onboarding the *helloserver* as a service to your project using the `keptn create service` and `keptn add-resource` commands. You need to pass the project where you want to create the service, as well as the Helm chart of the service.
+For this purpose we need the helm charts as a tar.gz archive. To archive it use following command:
+
+
+```
+tar cfvz ./helm-charts/helloserver.tgz ./helm-charts/helloserver
+```
+
+Then the service can be created:
+
+```
+keptn create service helloserver --project="pod-tato-head"
+keptn add-resource --project="pod-tato-head" --service=helloserver --all-stages --resource=./helm-charts/helloserver.tgz --resourceUri=helm/helloserver.tgz
+```
+
+After onboarding the service, tests (i.e., functional- and performance tests) need to be added as basis for quality gates. We are using JMeter tests, as the JMeter service comes "batteries included" with our Keptn installation.
+
+
+```
+keptn add-resource --project=pod-tato-head --stage=hardening --service=helloservice --resource=jmeter/load.jmx --resourceUri=jmeter/load.jmx
+keptn add-resource --project=pod-tato-head --stage=hardening --service=helloservice --resource=jmeter/jmeter.conf.yaml --resourceUri=jmeter/jmeter.conf.yaml
+```
+
+Now each time Keptn triggers the test execution, the JMeter service will pick up both files and execute the tests.
+
+## Deploy first build with Keptn
+Duration: 4:00
+
+We are now ready to kick off a new deployment of our test application with Keptn and have it deployed, tested, and evaluated.
+
+1. Let us now trigger the deployment, tests, and evaluation of our demo application.
+
+
+ ```
+ keptn trigger delivery --project="pod-tato-head" --service=helloservice --image="gabrieltanner/hello-server" --tag=v0.1.1
+ ```
+
+
+
+1. Let's have a look in the Keptn bridge what is actually going on. We can use this helper command to retrieve the URL of our Keptn bridge.
+
+
+ ```
+ echo http://$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')/bridge
+ ```
+
+ The credentials can be retrieved via the following commands:
+
+
+ ```
+ echo Username: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_USERNAME}" | base64 --decode)
+ echo Password: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_PASSWORD}" | base64 --decode)
+ ```
+
+ 
+
+1. **Optional:** Verify the pods that should have been created for the helloservice
+
+
+ ```
+ kubectl get pods --all-namespaces | grep helloservice
+ ```
+
+ ```
+ pod-tato-head-hardening helloservice-primary-5f779966f9-vjjh4 2/2 Running 0 4m55s
+ pod-tato-head-production helloservice-primary-5f779966f9-kbhz5 2/2 Running 0 2m52s
+ ```
+
+## View helloservice
+
+You can get the URL for the helloservice with the following commands in the respective namespaces:
+
+Hardening:
+
+
+```
+echo http://helloservice.pod-tato-head-hardening.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+```
+
+Production:
+
+
+```
+echo http://helloservice.pod-tato-head-production.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+```
+
+Navigating to the URLs should result in the following output:
+
+
+
+## Setup Prometheus Monitoring
+Duration: 3:00
+
+After creating a project and service, you can set up Prometheus monitoring and configure scrape jobs using the Keptn CLI.
+
+Keptn doesn't install or manage Prometheus and its components. Users need to install Prometheus and Prometheus Alert manager as a prerequisite.
+
+* To install the Prometheus and Alert Manager, execute:
+
+```
+kubectl create ns monitoring
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm install prometheus prometheus-community/prometheus --namespace monitoring
+```
+
+### Execute the following steps to install prometheus-service
+
+* Download the Keptn's Prometheus service manifest
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/prometheus-service/release-0.7.1/deploy/service.yaml
+```
+
+* Replace the environment variable value according to the use case and apply the manifest
+
+```
+# Prometheus installed namespace
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" PROMETHEUS_NS="monitoring"
+
+# Setup Prometheus Endpoint
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" PROMETHEUS_ENDPOINT="http://prometheus-server.monitoring.svc.cluster.local:80"
+
+# Alert Manager installed namespace
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" ALERT_MANAGER_NS="monitoring"
+```
+
+* Install Role and Rolebinding to permit Keptn's prometheus-service for performing operations in the Prometheus installed namespace.
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/prometheus-service/release-0.7.1/deploy/role.yaml -n monitoring
+```
+
+
+
+
+* Execute the following command to install Prometheus and set up the rules for the *Prometheus Alerting Manager*:
+
+```
+keptn configure monitoring prometheus --project=pod-tato-head --service=helloservice
+```
+
+
+
+
+### Optional: Verify Prometheus setup in your cluster
+
+* To verify that the Prometheus scrape jobs are correctly set up, you can access Prometheus by enabling port-forwarding for the prometheus-service:
+
+```
+kubectl port-forward svc/prometheus-server 8080:80 -n monitoring
+```
+
+Prometheus is then available on [localhost:8080/targets](http://localhost:8080/targets) where you can see the targets for the service:
+
+
+### Setup Prometheus SLI provider
+
+During the evaluation of a quality gate, the Prometheus provider is required that is implemented by an internal Keptn service, the *prometheus-service*. This service will _fetch the values_ for the SLIs that are referenced in an SLO configuration file.
+
+We are going to add the configuration for our SLIs in terms of an SLI file that maps the _name_ of an indicator to a PromQL statement how to actually query it.
+
+
+```
+keptn add-resource --project=pod-tato-head --stage=hardening --service=helloservice --resource=prometheus/sli.yaml --resourceUri=prometheus/sli.yaml
+```
+
+For your information, the contents of the file are as follows:
+
+```
+---
+spec_version: '1.0'
+indicators:
+ http_response_time_seconds_main_page_sum: sum(rate(http_server_request_duration_seconds_sum{method="GET",route="/",status_code="200",job="$SERVICE-$PROJECT-$STAGE-canary"}[$DURATION_SECONDS])/rate(http_server_request_duration_seconds_count{method="GET",route="/",status_code="200",job="$SERVICE-$PROJECT-$STAGE-canary"}[$DURATION_SECONDS]))
+ http_requests_total_sucess: http_requests_total{status="success"}
+ go_routines: go_goroutines{job="$SERVICE-$PROJECT-$STAGE"}
+ request_throughput: sum(rate(http_requests_total{status="success"}[$DURATION_SECONDS]))
+```
+
+## Set up the quality gate
+Duration: 4:00
+
+Keptn requires a performance specification for the quality gate. This specification is described in a file called `slo.yaml`, which specifies a Service Level Objective (SLO) that should be met by a service. To learn more about the *slo.yaml* file, go to [Specifications for Site Reliability Engineering with Keptn](https://github.com/keptn/spec/blob/master/service_level_objective.md).
+
+Activate the quality gates for the helloservice. Therefore, navigate to the `delivery/keptn` folder and upload the `slo.yaml` file using the [add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/) command:
+
+
+
+```
+keptn add-resource --project=pod-tato-head --stage=hardening --service=helloservice --resource=slo.yaml --resourceUri=slo.yaml
+```
+
+This will add the `slo.yaml` file to your Keptn - which is the declarative definition of a quality gate. Let's take a look at the file contents:
+
+```
+---
+spec_version: '0.1.0'
+comparison:
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ aggregate_function: avg
+objectives:
+ - sli: http_response_time_seconds_main_page_sum
+ pass:
+ - criteria:
+ - "<=1"
+ warning:
+ - criteria:
+ - "<=0.5"
+ - sli: request_throughput
+ pass:
+ - criteria:
+ - "<=+100%"
+ - ">=-80%"
+ - sli: go_routines
+ pass:
+ - criteria:
+ - "<=100"
+total_score:
+ pass: "90%"
+ warning: "75%"
+```
+
+## Deploying artifact with quality gates
+Duration: 3:00
+
+You can now deploy another artifact and see the quality gates in action.
+
+
+```
+keptn trigger delivery --project="pod-tato-head" --service=helloservice --image="gabrieltanner/hello-server" --tag=v0.1.1
+```
+
+
+
+After sending the artifact you can see the test results in Keptn Bridge.
+
+
+
+## Deploy a slow build version
+Duration: 5:00
+
+1. Use the Keptn CLI to deploy a version of the *helloservice*, which contains an artificial **slowdown of 2 second** in each request.
+
+
+ ```
+ keptn trigger delivery --project="pod-tato-head" --service=helloservice --image="gabrieltanner/hello-server" --tag=v0.1.2
+ ```
+
+
+
+1. Go ahead and verify that the slow build has reached your `hardening` environment by opening a browser. You can get the URL with this command:
+
+
+ ```
+ echo http://helloservice.pod-tato-head-hardening.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+## Quality gate in action
+Duration: 7:00
+
+After triggering the deployment of the *helloservice* in version v0.1.2, the following behaviour is expected:
+
+* **Hardening stage:** In this stage, version v0.1.2 will be deployed and the performance test starts to run for about 10 minutes. After the test is completed, Keptn triggers the test evaluation and identifies the slowdown. Consequently, a roll-back to version v0.1.1 in this stage is conducted and the promotion to production is not triggered.
+
+
+* **Production stage:** The slow version is **not promoted** to the production stage because of the active quality gate in place. Thus, still version v0.1.1 is expected to be in production.
+ - To verify, navigate to:
+
+ ```
+ echo http://helloservice.pod-tato-head-production.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+## Verify the quality gate in Keptn's Bridge
+Duration: 3:00
+
+Take a look in the Keptn's bridge and navigate to the last deployment. You will find a quality gate evaluation that got a `fail` result when evaluation the SLOs of our *helloservice* microservice. Thanks to this quality gate the slow build won't be promoted to production but instead automatically rolled back.
+
+To verify, the [Keptn's Bridge](https://keptn.sh/docs/0.10.x/reference/bridge/) shows the deployment of v0.1.2 and then the failed test in hardening including the roll-back.
+
+
+
+Here you can see that some of your defined test cases (for example, the response time) failed because you deployed a slow build that is not suitable for production. Once the test fails, the deployment will not be promoted to production and the hardening stage will return to its original state.
+
+## Finish
+Duration: 1:00
+
+Thanks for taking a full tour through Keptn!
+Although Keptn has even more to offer that should have given you a good overview what you can do with Keptn.
+
+### What we've covered
+
+- We have created a sample project with the Keptn CLI and set up a multi-stage delivery pipeline with the `shipyard` file
+
+ ```
+ apiVersion: "spec.keptn.sh/0.2.0"
+ kind: "Shipyard"
+ metadata:
+ name: "shipyard-sockshop"
+ spec:
+ stages:
+ - name: "hardening"
+ sequences:
+ - name: "delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "test"
+ properties:
+ teststrategy: "performance"
+ - name: "evaluation"
+ - name: "release"
+ - name: "production"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "hardening.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "release"
+ ```
+
+- We have set up quality gates based on service level objectives in our `slo` file
+ ```
+ ---
+ spec_version: '0.1.0'
+ comparison:
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ aggregate_function: avg
+ objectives:
+ - sli: http_response_time_seconds_main_page_sum
+ pass:
+ - criteria:
+ - "<=1"
+ warning:
+ - criteria:
+ - "<=0.5"
+ - sli: request_throughput
+ pass:
+ - criteria:
+ - "<=+100%"
+ - ">=-80%"
+ - sli: go_routines
+ pass:
+ - criteria:
+ - "<=100"
+ total_score:
+ pass: "90%"
+ warning: "75%"
+ ```
+
+
+- We have tested our quality gates by deploying a bad build to our cluster and verified that Keptn quality gates stopped them.
+ 
+
+
+{{ snippets/010/integrations/gettingStarted.md }}
+
+{{ snippets/010/community/feedback.md }}
diff --git a/site/tutorials/keptn-multistage-qualitygates-011/codelab.json b/site/tutorials/keptn-multistage-qualitygates-011/codelab.json
new file mode 100644
index 00000000..e91a01a5
--- /dev/null
+++ b/site/tutorials/keptn-multistage-qualitygates-011/codelab.json
@@ -0,0 +1,32 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:52+01:00",
+ "id": "keptn-multistage-qualitygates-011",
+ "duration": 57,
+ "title": "Multi-stage delivery with Quality Gates using Prometheus and Podtato-head application",
+ "authors": "Gabriel Tanner",
+ "summary": "Full Keptn installation on a Kubernetes cluster (GKE recommended)",
+ "source": "keptn-multistage-qualitygates-011_gen.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "prometheus",
+ "aks",
+ "eks",
+ "gke",
+ "openshift",
+ "minikube",
+ "quality-gates"
+ ],
+ "tags": [
+ "advanced",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-multistage-qualitygates-011"
+}
diff --git a/site/tutorials/keptn-public-demo-010.md b/site/tutorials/keptn-public-demo-010.md
index 4007e79e..54b2a36d 100644
--- a/site/tutorials/keptn-public-demo-010.md
+++ b/site/tutorials/keptn-public-demo-010.md
@@ -1,7 +1,7 @@
summary: Explore Keptn on our hosted demo cluster - no installation needed.
id: keptn-public-demo-010
categories: prometheus,quality-gates,k3s
-tags: keptn010x,introduction
+tags: keptn010x,introduction,quickstart
status: Published
authors: JΓΌrgen Etzlstorfer
Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
diff --git a/site/tutorials/keptn-public-demo-010/codelab.json b/site/tutorials/keptn-public-demo-010/codelab.json
index 5ebeb582..705ecd4b 100644
--- a/site/tutorials/keptn-public-demo-010/codelab.json
+++ b/site/tutorials/keptn-public-demo-010/codelab.json
@@ -17,7 +17,8 @@
"category": [
"prometheus",
"quality-gates",
- "k3s"
+ "k3s",
+ "quickstart"
],
"tags": [
"introduction",
diff --git a/site/tutorials/keptn-public-demo-011.md b/site/tutorials/keptn-public-demo-011.md
new file mode 100644
index 00000000..7181505f
--- /dev/null
+++ b/site/tutorials/keptn-public-demo-011.md
@@ -0,0 +1,40 @@
+summary: Explore Keptn on our hosted demo cluster - no installation needed.
+id: keptn-public-demo-011
+categories: prometheus,quality-gates,k3s
+tags: keptn011x,introduction,quickstart
+status: Published
+authors: JΓΌrgen Etzlstorfer
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+
+
+# Explore Keptn without installation
+
+## Explore Keptn
+Duration: 5:00
+
+
+
+ We are providing a hosted Keptn installation that you can explore without installing anything. Head over to [keptn.public.demo.keptn.sh](https://keptn.public.demo.keptn.sh/) and have a look around to explore 4 different projects that we are currently hosting:
+
+### Access the demos via [keptn.public.demo.keptn.sh](https://keptn.public.demo.keptn.sh/)
+
+Please find a short description of the projects to explore:
+
+### 1. **Litmus**
+Explore this project that orchestrates load tests along with chaos experiments to evaluate the resilience of applications. Learn more about this use case in the blog series ([part 1](https://medium.com/keptn/evaluating-kubernetes-resiliency-with-keptn-and-litmuschaos-66bdfb35cbdd?source=friends_link&sk=86b269ad3cec917ba01076328a20e914f), [part 2](https://medium.com/keptn/part-2-evaluating-application-resiliency-with-keptn-and-litmuschaos-use-case-and-demo-f43b264a2294?source=friends_link&sk=9a6810624fb5c85822c9e9484678722c)), in our [Keptn user group presentation](https://keptn.sh/resources/integrations/#evaluating-the-resiliency-of-your-microservices-with-litmuschaos-tests-and-keptn), and replicate this setup via our [tutorial](https://tutorials.keptn.sh/tutorials/keptn-litmus-010/index.html).
+
+### 2. Podtatohead
+Explore the 2-stage demo with the CNCF podtatohead application that has been onboarded. The application is deployed twice per day, with one fast and one slower build. Explore how the Keptn quality gates prevent the slower run to reach production, based on data from Prometheus. Please have a look at the [dedicated tutorial](https://tutorials.keptn.sh/tutorials/keptn-multistage-qualitygates-010/index.html) to set this up yourself.
+### 3. Sockshop
+
+Our famous Sockshop application with a 3-stage environment and quality gates using data from Dynatrace. Each day 3 builds try to make it into production, but only two versions are stable enough to pass the quality gates. You can set up this demo yourself by following the Sockshop tutorial using either [Prometheus](https://tutorials.keptn.sh/tutorials/keptn-full-tour-prometheus-010/index.html) or [Dynatrace](https://tutorials.keptn.sh/tutorials/keptn-full-tour-dynatrace-010/index.html).
+
+### 4. Unleash
+This project is holds the Unleash feature toggle framework that is used in our demos for auto-remediating production issues by switching feature flags. Learn more in our [full tours](https://tutorials.keptn.sh/tutorials/keptn-full-tour-dynatrace-010/index.html) how to set it up yourself.
+
+
+### Your favorite project
+What is your favorite project? Let us know via [Twitter](https://twitter.com/keptnProject) or in our [Slack channel](https://slack.keptn.sh).
+
+{{ snippets/010/community/feedback.md }}
diff --git a/site/tutorials/keptn-public-demo-011/codelab.json b/site/tutorials/keptn-public-demo-011/codelab.json
new file mode 100644
index 00000000..4dcd54d0
--- /dev/null
+++ b/site/tutorials/keptn-public-demo-011/codelab.json
@@ -0,0 +1,29 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:53+01:00",
+ "id": "keptn-public-demo-011",
+ "duration": 5,
+ "title": "Explore Keptn without installation",
+ "authors": "JΓΌrgen Etzlstorfer",
+ "summary": "Explore Keptn on our hosted demo cluster - no installation needed.",
+ "source": "keptn-public-demo-011.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "prometheus",
+ "quality-gates",
+ "k3s",
+ "quickstart"
+ ],
+ "tags": [
+ "introduction",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-public-demo-011"
+}
diff --git a/site/tutorials/keptn-public-demo-08.md b/site/tutorials/keptn-public-demo-08.md
index 71c6a42e..4c507b95 100644
--- a/site/tutorials/keptn-public-demo-08.md
+++ b/site/tutorials/keptn-public-demo-08.md
@@ -1,7 +1,7 @@
summary: Explore Keptn on our hosted demo cluster - no installation needed.
id: keptn-public-demo-08
categories: prometheus,quality-gates,k3s
-tags: keptn08x,introduction
+tags: keptn08x,introduction,quickstart
status: Published
authors: JΓΌrgen Etzlstorfer
Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
diff --git a/site/tutorials/keptn-public-demo-08/codelab.json b/site/tutorials/keptn-public-demo-08/codelab.json
index 8225f2e1..d2b05d58 100644
--- a/site/tutorials/keptn-public-demo-08/codelab.json
+++ b/site/tutorials/keptn-public-demo-08/codelab.json
@@ -17,7 +17,8 @@
"category": [
"prometheus",
"quality-gates",
- "k3s"
+ "k3s",
+ "quickstart"
],
"tags": [
"introduction",
diff --git a/site/tutorials/keptn-public-demo-09.md b/site/tutorials/keptn-public-demo-09.md
index a5990932..5c347560 100644
--- a/site/tutorials/keptn-public-demo-09.md
+++ b/site/tutorials/keptn-public-demo-09.md
@@ -1,7 +1,7 @@
summary: Explore Keptn on our hosted demo cluster - no installation needed.
id: keptn-public-demo-09
categories: prometheus,quality-gates,k3s
-tags: keptn09x,introduction
+tags: keptn09x,introduction,quickstart
status: Published
authors: JΓΌrgen Etzlstorfer
Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
diff --git a/site/tutorials/keptn-public-demo-09/codelab.json b/site/tutorials/keptn-public-demo-09/codelab.json
index 53d653f5..dd972f86 100644
--- a/site/tutorials/keptn-public-demo-09/codelab.json
+++ b/site/tutorials/keptn-public-demo-09/codelab.json
@@ -17,7 +17,8 @@
"category": [
"prometheus",
"quality-gates",
- "k3s"
+ "k3s",
+ "quickstart"
],
"tags": [
"introduction",
diff --git a/site/tutorials/keptn-quality-gates-dynatrace-011-on-k3s.md b/site/tutorials/keptn-quality-gates-dynatrace-011-on-k3s.md
new file mode 100644
index 00000000..6db2a421
--- /dev/null
+++ b/site/tutorials/keptn-quality-gates-dynatrace-011-on-k3s.md
@@ -0,0 +1,436 @@
+summary: 5 minute installation. All running on K3s with a single line installation. No Kubernetes cluster needed.
+id: keptn-quality-gates-dynatrace-011-on-k3s
+categories: Dynatrace,k3s,quality-gates
+tags: keptn011x,introduction
+status: Published
+authors: Andreas Grabner
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+
+# Keptn Quality Gates with Dynatrace using k3s
+
+## Welcome
+Duration: 2:00
+
+Watch the intro in this YouTube video to see what this tutorial includes:
+
+
+If you have tools that deploy your applications and then run tests against those you have done the hard work already. *Keptn's Quality Gates* help you to automate the evaluation of your test results and the monitoring data captured during your tests. Keptn does this by embracing the concept of Service Level Indicators (SLIs) and Service Level Objectives (SLOs). Essentially SLIs are metrics such as Response Time, Throughput, Error Rate, Number of Database Calls, Time spent in external service calls, ... and SLOs define which objective you have for these SLIs to consider your service to be adhering to what you expect, e.g: Response Time of Login should be faster than 200ms or Login should not make more than 1 database query.
+Keptn didn't come up with these concepts. They have been around for a while and made very popular thanks to the work that Google did around [Site Reliability Engineering](https://landing.google.com/sre/sre-book/chapters/service-level-objectives)
+
+In this tutorial we teach you how you can use Keptn Quality Gates to automatically analyze important metrics (SLIs) that Dynatrace captures while your system is under load, e.g: during a load test and compare them against your expected behavior (SLOs). This comparison can either be against well defined thresholds, e.g: 200ms response time but can also be a comparison against previous test results, e.g: response time should not get slower than 10% of our previous build.
+
+The real benefit is visualized in the following animation. Keptn Quality Gates help you automate the manual task of analyzing or comparing data on dashboards to determine whether a build meets your quality criteria.
+
+
+
+## Installation of Keptn on k3s
+Duration: 5:00
+
+We have an extensive [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s) tutorial on GitHub that includes setting up k3s, installing Keptn and automatically creating several Keptn projects to demo different use case with Dynatrace.
+In this tutorial we are only focus on the Quality Gate use case for Dynatrace. If you want to explore more check out other tutorials on https://tutorials.keptn.sh or check out the full tutorial details on [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s)
+
+### Pre-Requs
+
+**Linux Machine**
+You should have a Linux machine with at least 2vCPUs and 8GB of RAM, e.g: EC2 t3.large.
+If you want to run additional tutorials later, e.g: performance testing, continuous delivery .. then I suggest you get a machine with 8vCPUs and 32GB RAM, e.g: EC2 t3.2xlarge.
+As we will be accessing Keptn that we install on that machine via HTTP and HTTPS make sure that both ports (80 & 443) are allowed by your firewall.
+What we will need is SSH access to this machine and SUDO rights so we can execute our scripts
+
+So - here is our checklist:
+- [x] Large enough Linux Machine
+- [x] Ports 80 & 443 are open
+- [x] SSH access and SUDO rights
+
+**Dynatrace Environment**
+Next thing we need is a Dynatrace environment. If you don't have one - just sign up for a [Dynatrace SaaS Trial](https://dynatrace.com/trial). As this is a tutorial about Quality Gates which means - automating the analysis of data in Dynatrace please make sure that you are installing a Dynatrace OneAgent on at least one machine so that we can get some monitoring data. For more details simply follow the steps that the Dynatrace trial guides you through!
+
+*1: API Token: DT_API_TOKEN*
+For our Keptn installation we need a Dynatrace API Token that Keptn can use to query data as well as push some dashboards.
+For that - please create an API token (via Settings -> Integration -> Dynatrace API) with the following privileges. Make sure you copy that API Token in a safe spot:
+
+
+
+*2: PaaS Token: DT_PAAS_TOKEN*
+We also need a Dynatrace PaaS Token that allows our install script to install a Dynatrace OneAgent on k3s to also monitor that k3s cluster automatically. For that go to Settings -> Integration -> Platform as a Service and create a new token!
+
+*3: Your Dynatrace Tenant Host: DT_TENANT*
+If you have a Dynatrace SaaS (e.g: trial) environment we need the host name, e.g: abc12345.live.dynatrace.com. If you run Dynatrace Managed then we need the host + environment id, e.g: yourmanageddomain.com/e/YOUR-ENV-ID
+
+*4: Your Dynatrace username: OWNER_EMAIL*
+This should be simple. The username you have when logging in to Dynatrace. That is probably your email. We need that because a dashboard will be created automatically for us and every dashboard needs an owner. This is why we need that email!
+
+So - here is our checklist:
+- [x] DT_API_TOKEN
+- [x] DT_PAAS_TOKEN
+- [x] DT_TENANT
+- [x] OWNER_EMAIL
+
+### Preparing our Linux Host
+
+Now that we have all data we can start with the installation
+
+**Installing additional command line tools**
+The installation script of our tutorial needs a couple of tools as those scripts will download some additional files (via curl), will parse some files (jq, yq), will iterate through file system (tree). We will also need git to download a git repo. Here is a list of all these tools and how you could install them if you have yum. For other Linux distributions please check how to install these tools: git, curl, jq, tree, yq
+
+```console
+sudo yum update -y
+sudo yum install git -y
+sudo yum install curl -y
+sudo yum install jq -y
+sudo yum install tree -y
+sudo wget https://github.com/mikefarah/yq/releases/download/v4.2.0/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
+```
+
+**Download (git clone) the tutorial**
+As mentioned, the [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s) is a broad tutorial. While we are only using parts of it we simply download the whole thing locally which also includes the actual installation script. To do that we simply do a git clone and switch to the release-0.9.0 branch of that tutorial:
+
+```console
+git clone https://github.com/keptn-sandbox/keptn-on-k3s
+cd keptn-on-k3s
+git checkout release-0.9.0
+```
+
+### Installing Keptn on k3s for Quality Gates
+
+We are almost ready to run our installation script. The only thing left to do is to export some of our data we prepared earlier (token, endpoints ...) via env-variables so the script can easily pick it up. Additionally to the 4 variables we prepared we also `export LE_STAGE=staging` - this will allow us to get a LetsEncrypt staging certificate so we can use TLS encryption.
+
+Now - here is what you should export - obviously with the values that you have collected:
+```console
+$ export DT_TENANT=abc12345.live.dynatrace.com
+$ export DT_API_TOKEN=YOUR_API_TOKEN
+$ export DT_PAAS_TOKEN=YOUR_PAAS_TOKEN
+$ export OWNER_EMAIL=yourdynatraceuser@yourmail.com
+$ export LE_STAGE=staging
+```
+
+The installation scripts has multiple options.
+
+If you really **JUST want to do quality gates** then please run the following. This will install just Keptn's Control Plane with the Dynatrace integration and the Dynatrace Demo projects. *BE AWARE* there is an option called --provider. If you run on e.g: EC2 then specify aws. If your machine is hosted on GCP then specify gcp. If you just run on a local machine or a VM you can omit that parameter!
+
+```console
+./install-keptn-on-k3s.sh --controlplane --provider aws --with-dynatrace --with-demo dynatrace --letsencrypt --with-gitea
+```
+
+If you want to run **MORE tutorials in the future**, e.g: delivery or performance testing you can switch the --controlplane to --deliveryplane like this
+```console
+./install-keptn-on-k3s.sh --deliveryplane --provider aws --with-dynatrace --with-demo dynatrace --letsencrypt --with-gitea
+```
+
+**Use your own Domain Name**
+By default the installation will use your local IP and a free DNS Resolution service from xip.io to use proper DNS names. That works well but we have learned that it might sometimes be a bit unstable. You can create your own DNS, eg.: using Route53 to point to your public IP and then pass this domain name via the parameter --fqdn. Here would be the installation option to install the quality gates with a custom domain!
+```console
+./install-keptn-on-k3s.sh --controlplane --provider aws --with-dynatrace --with-demo dynatrace --letsencrypt --with-gitea --fqdn yourdomain.abc
+```
+
+
+At the end of the installation the script outputs information about the installation and about each demo that was installed. Important for our tutorial is the output that looks like this as it contains all information on how we can access Keptn:
+```
+#######################################>
+# Keptn Deployment Summary
+#######################################>
+API URL : https://keptn.YOUR.IP.xip.io/api
+Bridge URL: https://keptn.YOUR.IP.xip.io/bridge
+Bridge Username: keptn
+Bridge Password: YOURBRIDGEPASSWORD
+API Token : YOURKEPTNAPITOKEN
+Git Server: http://git.YOUR.IP.xip.io
+Git User: keptn
+Git Password: keptn#R0cks
+```
+
+Additionally take note of the first Dynatrace Demo Summary block which is the block that explains the demo that was installed for Quality Gates
+```
+#######################################>
+# Dynatrace Demo Summary
+#######################################>
+5 Dynatrace Demo projects have been created, the Keptn CLI has been downloaded and configured and a first demo quality gate was already executed.
+
+For the Quality Gate Use case you can do this::
+1: Open the Keptn's Bridge for your Quality Gate Project:
+ Project URL: https://keptn.YOUR.IP.xip.io/bridge/project/dynatrace
+ User / PWD: keptn / YOURBRIDGEPASSWORD
+2: Run another Quality Gate via:
+ keptn trigger evaluation --project=dynatrace --stage=quality-gate --service=demo
+3: Automatically synchronize your Dynatrace monitored services with Keptn by adding the 'keptn_managed' and 'keptn_service:SERVICENAME' tag
+ More details here: https://github.com/keptn-contrib/dynatrace-service#synchronizing-service-entities-detected-by-dynatrace
+```
+
+To validate the installation went fine lets open Keptn's bridge by following the link to the Dynatrace project that was created for us. The url ends with /bridge/project/dynatrace!
+You may notice that the URL ends with xip.io. We are using this free DNS service to leverage DNS names which also allows us to do some traffic routing on different domain names even though everything in the end resolves to your local IP. You will also notice that your browser tells you that the website is not secure even though you are accessing an https endpoint. This is because we created a temporary staging certificate using LetsEncrypt. If you want to use Keptn for production use cases you would need to create your own certificates. For our tutorial its OK though - you can just tell your browser to continue.
+When you are prompted for username and password simply use bridge username & password that you find in the installation script output.
+
+In the bridge then navigate to the Services screen, expand Demo and click on the first evaluation that should already have been executed!
+
+
+
+## Running another Quality Gate through the CLI
+Duration: 5:00
+
+Our keptn installation is ready to execute another quality gate evaluation.
+As you have seen after the installation we have a project called Dynatrace, it already has a Demo service and the project itself has a stage called quality-gate.
+Now, lets explore two options on how to trigger a quality gate. First is through the [Keptn CLI](https://keptn.sh/docs/0.10.x/reference/cli/), later we also look at the [Keptn API](https://keptn.sh/docs/0.10.x/reference/api/)!
+
+### Execute a Quality Gate through the Keptn CLI
+
+On your linux host the [Keptn CLI](https://keptn.sh/docs/0.10.x/reference/cli/) was downloaded and is already authenticated against your Keptn installation. We can now simply execute the following command which will trigger the quality gate letting Keptn know to evaluate the timeframe of the last 30 minutes.
+
+```
+keptn trigger evaluation --project=dynatrace --stage=quality-gate --service=demo --timeframe=30m
+```
+
+The output will be something like this:
+```
+Warning: could not open KUBECONFIG file: Cannot find file /home/ec2-user/.kube/config
+Starting to trigger evaluation of the service demo in project dynatrace
+ID of Keptn context: b6bf3cfe-f812-4719-b4b2-17760a16bd2c
+```
+
+Now - you can safely ignore the warning. This is just because the keptn CLI tries to access kubectl which is currently not configured to access your k3s cluster. We can fix this through the following export if you want:
+```
+export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
+```
+
+Whats important to note is that Keptn is an event driven system which means that our quality gate request was confirmed and we received a so called Keptn context. Thats the ID we can use to also query Keptn for the result of that quality gate or its status. This is especially useful when you want to integrate Keptn into other tools.
+
+**Using Labels to identify buildId or other meta data**
+Another very useful option is --labels which allows you to add additional meta data such as buildId, triggeredBy or really any other metadata you want to associate with that quality gate run. This becomes super useful when you trigger quality gates from e.g: your CI such as Jenkins. You can pass in Jenkin Build ID, Jenkins URL, Git Commit ... - all this data will then show up in Keptn as well in the context of a quality gate.
+
+Lets run another quality gate with some labels. Lets use buildId (exactly cased like that). This is a special label as you will see it will be used in the chart as the x-Axis label.
+
+```
+keptn trigger evaluation --project=dynatrace --stage=quality-gate --service=demo --timeframe=30m --labels=buildId=1,triggeredBy=me
+```
+
+When we refresh the bridge you will see your latest quality gate requests and the results:
+
+
+
+
+## Where does the Quality Gate data come from?
+Duration: 5:00
+
+Now - you may wonder: which data is analyzed? Which metrics are extracted? Against which values are they evaluated? Can I change this?
+The Demo service that was created is meant for you to get a quick "out of the box success" to see that Quality Gates with Dynatrace work really well. Lets now explore where the data comes from and how you can create your own quality gates with your own metrics!
+
+Keptn pulls metrics (SLIs=Service Level Indicators) from Dynatrace. The list of SLIs can either be specified in YAML files - or - can be specified through a Dynatrace dashboard. The dashboard option is a bit more convenient to get started with. The YAML option is however better for automating quality gates as part of your GitOps automation as these YAMLs can be specified by your engineers and can live next to their source code.
+The Demo service we have here uses the Dashboard approach and it is a dashboard that is automatically created or updated every time you run a quality gate for the demo service.
+
+**Show me that dashboard!!**
+In the Keptn's bridge when you look at the evaluation result you notice a small icon on the top right that is highlighted here:
+
+
+When you click on it you get to a full screen version of the quality gate result and there you also get a link to the Dynatrace dashboard that was used as shown here:
+
+
+Clicking on that link gets you to the dashboard that is used:
+
+
+You can see that this dashboard has a very specific name: KQG;project=dynatrace;service=demo;stage=quality-gate
+This is the way the Keptn -> Dynatrace integration knows that this is the dashboard that defines which SLIs (=metrics) to analyze and also against which SLOs (=thresholds) to validate them against. If you look close you can see that some of the charts have some special naming such as sli=svc_rt_p95 and also contain pass & warning criteria.
+
+For more information about how you can leverage a Dynatrace dashboard to define your SLIs & SLOs for a Keptn Quality Gate check out [SLIs & SLOs via Dynatrace Dashboard](https://github.com/keptn-contrib/dynatrace-sli-service#slis--slos-via-dynatrace-dashboard)
+
+## Running a Quality Gate through the API
+Duration: 5:00
+
+Another option to trigger a quality gate is through the Keptn APIs. You can find a lot of information about the [Keptn API](https://keptn.sh/docs/0.10.x/reference/api/)
+Let me show you how you can trigger the same 30 minute evaluation using the Keptn API through the Swagger UI Interface.
+
+Click on the top right person icon - this will give you the chance to copy the API Token in your clipboard and also open the Keptn API Swagger UI:
+
+
+The Keptn API provides different API definitions:
+1: Select the "controlPlane" definition from the top right
+2: Click on Authorize, paste your token and authorize with that token
+3: Now scroll down to the *Evaluation* API
+4: Expand it and click on Try Now
+
+Now we can provide the same information as we used for the CLI:
+* project = dynatrace
+* stage = quality-gate
+* service = demo
+
+The evaluation payload contains the time and label information. Just copy paste the following into that text field:
+```
+{
+ "labels": {
+ "buildId": "2",
+ "triggeredBy": "api"
+ },
+ "timeframe": "30m"
+}
+```
+
+Here the screenshot on how that should look like:
+
+
+Then click on Execute. Keptn will now process our request. The Swagger UI also gives you the details on how that same API call can be done via CURL which is create as you can directly use this for your automation.
+
+Now you can go back to the bridge and validate if the evaluation succeeded!
+
+
+## Enable Quality Gates for a Dynatrace monitored service
+Duration: 10:00
+
+The tutorial so far has shown you quality gates with a demo setting and a dashboard that contains some random metrics. You could now go off and create your own Keptn project, create your own services and then create your own dashboards.
+Another option you have is to leverage the auto-synchronization feature between Dynatrace monitored service entities and the Keptn Project called `Dynatrace`. This feature is explained in detail under [Synchronizing Service Entities detected by Dynatrace](https://github.com/keptn-contrib/dynatrace-service#synchronizing-service-entities-detected-by-dynatrace)
+
+If you have any services monitored with Dynatrace, e.g: Java services, .NET, GO, Python, ... then they would be a great candidate to enable for Quality Gates.
+All we need to do is put two tags on those services as shown in the following screenshot:
+
+
+
+Every 5 minutes the Keptn -> Dynatrace integration queries the Dynatrace Smartscape API to see if there are any monitored service the the tag "keptn_managed". If so it will automatically add that service to the Dynatrace project in Keptn using the name that is specified in the tag "keptn_service". In the screenshot above that is "simplenode".
+
+So - for this to work you need
+1: Have a project in Keptn called "Dynatrace" -> that came with our tutorial
+2: Tag a service with keptn_managed and keptn_service
+3: Wait up to 5 minutes
+
+Now you are good to go and can run a quality gate for that service. How? Lets use the following CLI:
+
+```
+keptn trigger evaluation --project=dynatrace --stage=quality-gate --service=simplenode --timeframe=30m
+```
+
+The result should look like this:
+
+
+You may say: why only 3 SLIs and not more? Because we dont have a dashboard with the proper name and the default SLI and SLO have been specified just around those 3 metrics.
+You can extend this by either adding your SLIs & SLOs in the YAML files that keptn keeps internally for your project and service - or - you can create a dashboard and tell Keptn to look at your dashboard
+
+## Access Keptn's Configuration Files via Gitea
+Duration: 2:00
+
+Keptn internally keeps all its configuration files in a git repo. The tutorial has installed a web Git service called [Gitea](https://gitea.io/en-us/) and defined upstream projects into that Gitea service so that you can access all keptn files and even modify them. This is a great way to learn more about keptn, all the config files and explore.
+
+The best way to access those repos is by clicking on the links in the keptn project overview page:
+
+
+You can always explore all files read-only. You can also login with keptn / keptn#R0cks and you can with that modify files, e.g: you could modify the default SLI or SLO yamls that were created when synchronizing your service. Or - you could modify the default dashboard that gets created by Keptn as part of the Demo quality-gate.
+
+Here is a screenshot that shows all files relevant for the quality-gate stage for the Dynatrace project:
+
+
+
+## Quality Gates behind the scenes
+Duration: 2:00
+
+In this last section we want to quickly recap what the tutorial has automatically set-up so you can do it yourself for other projects:
+
+**1: Create new project with a quality-gate shipyard**
+
+It first created a new project called Dynatrace using a specific pre-defined shipyard.yaml
+
+```
+keptn create project dynatrace --shipyard=./shipyard.yaml
+```
+
+Here is the shipyard that was used which includes a step for monaco (=Monitoring as Code) followed by the actual evaluation. If you don't want to use Monaco to e.g: create automated dashboards then you can leave this step out or add other steps, e.g: approval, notifications ...
+
+```yaml
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-quality-gates"
+spec:
+ stages:
+ - name: "quality-gate"
+ sequences:
+ - name: evaluation
+ tasks:
+ - name: monaco
+ - name: evaluation
+```
+
+The important thing about that shipyard is that it contains a stage which contains a sequence with the name `evaluation`. This is important if you want to use the keptn cli for `keptn trigger evaluation` as it will send an event to trigger the sequence called `evaluation`
+
+The simplest shipyard for quality gates can look like this as Keptn by default assumes that an empty stage without sequences will be used for evaluation:
+```yaml
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-quality-gates"
+spec:
+ stages:
+ - name: "quality-gate"
+```
+
+**2: Create a service**
+
+As we have our Dynatrace project with a quality-gate stage we now need to add services. In keptn we therefore use the create service command and for instance add the demo service:
+```console
+keptn create service demo --project=dynatrace
+```
+
+**3: Upload configuration files**
+
+As you have seen when exploring the configuration repository through Gitea there are quite some files uploaded to this project. The most important ones are uploaded on a project level such as the dynatrace/dynatrace.conf.yaml and dynatrace/monaco.conf.yaml. These are configuration files that will be used by the Dynatrace integration to determine whether it should look for a dashboard to retrieve SLI/SLO configuration and also which Monaco (Monitoring as Code) projects to apply.
+
+Those files can be uploaded through the keptn cli, e.g:
+```
+keptn add-resource --project=dynatrace --resource="dynatrace.conf.yaml" --resourceUri="dynatrace/dynatrace.conf.yaml"
+```
+
+To learn more about how to upload files consult the Keptn CLI documentation for [add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/)
+
+## Troubleshooting
+Duration: 0:00
+
+Here some troubleshooting tips
+
+### Lost your Keptn's bridge username / password
+
+The credentials can be retrieved via the following commands:
+
+```
+echo Username: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_USERNAME}" | base64 --decode)
+echo Password: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_PASSWORD}" | base64 --decode)
+```
+
+### Sometimes get browser errors when accessing bridge
+
+This is possible as we are using xip.io as a free service to resolve DNS. If that temporarily fails it can have impact to either you accessing the bridge or the API. It can also have impact on Keptn itself. In that case - just retry!
+To make this more stable - feel free to leverage your own DNS that you point to the public IP of your host and install the tutorial with the option --fqdn yourdomain.abc
+
+### Installation of tutorial failed
+
+Please contact us through the [Keptn slack workspace](https://slack.keptn.sh). We have a channel called #keptn-docs where you can directly ping us about issues on the tutorials
+
+
+## Uninstall
+Duration: 0:00
+
+If you are done and want to uninstall the tutorial you can simply execute the following command
+```console
+k3s-uninstall.sh
+```
+
+This will delete the k3s cluster including keptn.
+If you also want to delete the git repository content you can also delete the local directory keptn-on-k3s
+
+## Other tutorials
+
+If you want to explore other tutorials that are possible with this keptn on k3s demo then have a look at more tutorials on https://tutorials.keptn.sh (we are still developing some of them) - or explore the content on [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s)
+
+## Finish
+Duration: 0:00
+
+In this tutorial, you have learned how to use Keptn to validate the quality of your deployments by evaluating a set of SLIs (Service Level Indicators) against your SLOs (Service Level Objectives) for a specified timeframe! The overall goal is to use this capability to automate the manual evaluation of metrics through dashboards.
+
+As you have now learned how to setup Keptn for pulling metrics out of Dynatrace the next step is that you do this with metrics that are important for your services, applications, processes and hosts. Think about how you can convert your Dynatrace dashboards into SLIs and SLOs and then have Keptn automate the analysis for you:
+
+
+
+
+### What we've covered
+
+- Install Keptn and setup a Keptn Project for Quality Gate evaluation
+- Prepare Dynatrace to act as a data source for Quality Gate evaluation
+- Learn how to define and use service-level indicators (SLIs) and service-level objectives (SLOs)
+- How to trigger a Keptn Quality Gate evaluation using the CLI and the API
+- How to use the Keptn's Bridge to inspect your Quality Gate Results
+
+{{ snippets/010/community/feedback.md }}
diff --git a/site/tutorials/keptn-quality-gates-dynatrace-011-on-k3s/codelab.json b/site/tutorials/keptn-quality-gates-dynatrace-011-on-k3s/codelab.json
new file mode 100644
index 00000000..a134344f
--- /dev/null
+++ b/site/tutorials/keptn-quality-gates-dynatrace-011-on-k3s/codelab.json
@@ -0,0 +1,28 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:53+01:00",
+ "id": "keptn-quality-gates-dynatrace-011-on-k3s",
+ "duration": 36,
+ "title": "Keptn Quality Gates with Dynatrace using k3s",
+ "authors": "Andreas Grabner",
+ "summary": "5 minute installation. All running on K3s with a single line installation. No Kubernetes cluster needed.",
+ "source": "keptn-quality-gates-dynatrace-011-on-k3s_gen.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "dynatrace",
+ "k3s",
+ "quality-gates"
+ ],
+ "tags": [
+ "introduction",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-quality-gates-dynatrace-011-on-k3s"
+}
diff --git a/site/tutorials/keptn-quality-gates-prometheus-011.md b/site/tutorials/keptn-quality-gates-prometheus-011.md
new file mode 100644
index 00000000..cc3ee1e1
--- /dev/null
+++ b/site/tutorials/keptn-quality-gates-prometheus-011.md
@@ -0,0 +1,319 @@
+summary: 5 minute installation. All running on K3s with a single line installation. No Kubernetes cluster needed.
+level: beginner
+id: keptn-quality-gates-prometheus-011
+categories: prometheus,quality-gates,k3s
+tags: keptn011x,introduction
+status: Published
+authors: JΓΌrgen Etzlstorfer
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+
+
+# Keptn Quality Gates with Prometheus
+
+## Welcome
+Duration: 2:00
+
+
+
+Let's say you want to use your existing tools to deploy and test your applications - you can still use *Keptn`s Quality Gates* for the evaluation of Service Level Objectives (SLOs).
+
+*A brief recap of SLO and SLI:* A Service Level Objective (SLO) is a target value or range of values for a service level that is measured by a Service Level Indicator (SLI). An SLI is a carefully defined quantitative measure of some aspect of the level of service that is provided.
+
+Positive
+: For more information about SLO and SLI, please take a look at [Quality Gates with Keptn](https://keptn.sh/docs/concepts/quality_gates/).
+
+### What we will cover in this tutorial
+
+- Set up a demo with Prometheus and Keptn
+- Trigger Keptn Quality Gate evaluations
+- Manage SLOs via the Keptn
+
+
+Here is what we are going to do in the course of this demo:
+
+
+
+## Prerequisites
+Duration: 2:00
+
+Everything you need to run for this tutorial is provided on Github and you'll run the whole tutorial (Keptn installation + demos) in a VM. This is not recommended for production use-cases, but it will give you a quick and easy start to explore Keptn.
+
+### What you need
+
+You should have a Linux machine with at least 2vCPUs and 8GB of RAM, e.g: EC2 t3.large. This tutorial was tested on a EC2 with Amazon Linux, size t3.large instance, if you can bring this one please do so as it will give you a smooth experience.
+If you want to run additional tutorials later, e.g: performance testing, continuous delivery ... then we suggest you get a machine with 8vCPUs and 32GB RAM, e.g: EC2 t3.2xlarge.
+As we will be accessing Keptn that we install on that machine via HTTP and HTTPS make sure that both ports (80 & 443) are allowed by your firewall.
+What we will need is SSH access to this machine and sudo rights so we can execute our scripts.
+
+Here is our checklist:
+
+β
Large enough Linux VM: EC2 with Amazon Linux, at least of size t3.large recommended
+β
Ports 22, 80 & 443 are open
+β
SSH access and sudo rights
+
+In the following steps, we are making use of resources that are provided on GitHub. If you want to explore them, have a look at our repo [Keptn on k3s](https://github.com/keptn-sandbox/keptn-on-k3s)!
+
+## Set up the tutorial
+Duration: 2:00
+
+Login to your Linux VM and prepare it for this tutorial by executing a couple of pre-requisite steps.
+Let's prepare the machine with all needed utilities.
+
+```
+sudo yum update -y
+sudo yum install git -y
+sudo yum install curl -y
+sudo yum install jq -y
+sudo yum install tree -y
+sudo wget https://github.com/mikefarah/yq/releases/download/v4.2.0/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
+
+git clone https://github.com/keptn-sandbox/keptn-on-k3s
+cd keptn-on-k3s
+git checkout release-0.9.0
+```
+
+This will install some tools that we are going to need for the tutorial and already download the tutorial files.
+
+## Install Keptn + demo
+Duration: 3:00
+
+Now that we have prepared our machine for the tutorial and also downloaded the needed files, let us go ahead and start the installation script.
+
+We recommend that you are using the following command. However, the Keptn-on-K3s project is in general customizable and can be executed with other parameters as well. Please have a look at the [Keptn-on-K3s Github repo](https://github.com/keptn-sandbox/keptn-on-k3s) for full instructions of all capabilities. As said, for this tutorial we are going to install it with the following command.
+
+Negative
+: Please note that we are assuming here that you are running an EC2 instance in the AWS cloud. If that is not the case, please change the "provider" flag to fit your environment.
+
+We need to provide a email address to create a certificate via Let's Encrypt. You can do so via entering your own email address or just copy the line below.
+```
+export CERT_EMAIL=mykeptntutorial@certemail.com
+```
+
+Let's go ahead and install Keptn and the demo:
+
+```
+./install-keptn-on-k3s.sh --controlplane --provider aws --with-prometheus --with-demo prometheus --with-gitea --letsencrypt --disable-bridge-auth --use-nip
+```
+
+In my tests, the full installation and setup usually takes less than 7 minutes to finish.
+
+While the script is running, let us take a look at the installation flags that we've chosen:
+
+- `--controlplane`: we only install the Keptn control plane as we do not need additional execution plane services (as we only run the Keptn quality gates use case)
+- `--provider aws`: with this flag we configure the installation for AWS
+- `--with-prometheus`: we will have Prometheus + the Prometheus integrations for Keptn installed
+- `--with-demo prometheus`: we will have a demo application ready to be used with Prometheus installed
+- `--with-gitea`: we will have Gitea - a git user interface in the browser - installed
+- `--letsencrypt`: we will have a Let's Encrypt certificate being issued for us installed
+- `--disable-bridge-auth`: we will disable the authentication for Keptn Bridge to be able to access it without password (not recommended for production use cases)
+
+For a full list of installation flags, please have a look at the [keptn-on-k3s GitHub repo](https://github.com/keptn-sandbox/keptn-on-k3s).
+
+Let's move on to the next step while the installation is running!
+
+
+## While the installation is running
+Duration: 2:00
+
+While the installation is running, let's have a look what will be installed.
+
+- **Keptn control plane**: all services of the Keptn control plane are being installed to allow for the quality gate evaluation. This includes also the Keptn Bridge as the UI of Keptn that we are going to use to have a look at the evaluations.
+
+- **Prometheus**: In this demo we are using Prometheus as our monitoring tool and datastore. Therefore, Prometheus will be installed along with the [Prometheus integrations](https://github.com/keptn-contrib/prometheus-service) for Keptn.
+
+- **Podtatohead demo application**: We are going to use the CNCF [podtatohead demo application](https://github.com/cncf/podtato-head) for this demo with a custom-built image that serves our purpose. The demo application will be ready for you after the installation.
+
+- **Gitea**: A user interface to browse the Keptn managed Git repo will be installed.
+
+- **First run of a quality gate**: To make use of the demo application, Prometheus, and the Keptn quality gates, a first quality gate evaluation will already be triggered. This allows us to already take a look at the first evaluation upon installation.
+
+
+Once the installation is finished, the final output should look similar to this:
+
+```
+#######################################>
+# Keptn Deployment Summary
+#######################################>
+API URL : https://keptn.xxx.nip.io/api
+Bridge URL: https://keptn.xxx.nip.io/bridge
+Bridge Username:
+Bridge Password:
+API Token : xxx
+Git Server: https://git.xxx.nip.io
+Git User: keptn
+Git Password: xxx
+The Keptn CLI has already been installed and authenticated. To use keptn here some sample commands
+$ keptn status
+$ keptn create project myfirstproject --shipyard=./shipyard.yaml
+
+If you want to install the Keptn CLI somewhere else - here the description:
+- Install the keptn CLI: curl -sL https://get.keptn.sh | sudo -E bash
+- Authenticate: keptn auth --api-token "xxx" --endpoint "https://keptn.xxx.nip.io/api"
+
+If you want to uninstall Keptn and k3s simply type: k3s-uninstall.sh!
+After that also remove the demo files that were downloaded in your local working directory!
+
+Now go and enjoy Keptn!
+```
+
+Great - let's move on!
+
+## Explore the first Keptn quality gate evaluation
+Duration: 2:00
+
+At the end of the installation output, you will find the URL for accessing the Keptn Bridge.
+
+```
+#######################################>
+# Keptn Deployment Summary
+#######################################>
+API URL : https://keptn.xxxx.nip.io/api
+Bridge URL: https://keptn.xxx.nip.io/bridge
+Bridge Username:
+Bridge Password:
+...
+```
+
+Open the URL and you will find a project already created for you.
+
+
+
+We can navigate via the "Sequence" item on the left to take a look at the first task sequence that Keptn executed and which includes our quality gate evaluation.
+
+
+
+By clicking on the "bar chart" icon π in the evaluation tile, it will bring you to the detail screen of the quality gate.
+
+
+
+We can see that our first quality gate evaluation scores 100% and all our SLOs are met!
+
+Positive
+: If you want to [learn more about SLIs and SLOs](https://keptn.sh/docs/concepts/quality_gates/) please have a look at our documentation.
+
+The quality gate itself is defined in terms of an `SLO.yaml` file that is stored in the Keptn managed Git repository.
+Here is what it looks like:
+
+```
+---
+spec_version: '0.1.0'
+comparison:
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ aggregate_function: avg
+objectives:
+ - sli: http_response_time_seconds_main_page_sum
+ pass:
+ - criteria:
+ - "<=1"
+ warning:
+ - criteria:
+ - "<=0.5"
+ - sli: request_throughput
+ pass:
+ - criteria:
+ - ">=-80%"
+ - sli: go_routines
+ pass:
+ - criteria:
+ - "<=100"
+ - sli: response_time_p95
+total_score:
+ pass: "90%"
+ warning: "75%"
+```
+
+As you can see, it comprises a list of objectives along with criteria that have to be satisfied. In the tutorial, the integration of Prometheus and Keptn is already installed, and therefore Keptn knows how to retrieve the data for each of the specified SLIs in this document.
+The mapping is defined in a so called `SLI.yaml`. You will also find this file in the Keptn managed Git repository. Have a look in the `quality-gate` branch of the repo, that you can access via:
+
+```
+echo https://$(k3s kubectl get ingress gitea-ingress -n git -ojsonpath='{.spec.rules[0].host}')/keptn/prometheus-qg/src/branch/quality-gate
+```
+
+
+## Deploy a new version
+Duration: 2:00
+
+We've prepared a second version of the Podtatohead application for you to evaluate with the Keptn Quality Gates.
+Therefore, let us deploy this alternative version.
+
+Execute the following commands that will deploy the new version.
+
+```
+k3s kubectl set image deploy/helloservice server=ghcr.io/podtato-head/podtatoserver:v0.1.2 --record -n prometheus-qg-quality-gate
+```
+
+If you want to take a look, you can retrieve the URL of your application via this command.
+
+```
+echo https://$(k3s kubectl get ingress podtato-ingress -n prometheus-qg-quality-gate -ojsonpath='{.spec.rules[0].host}')
+```
+
+
+
+Great - let's move on!
+
+## Validate with Keptn Quality Gate
+Duration: 2:00
+
+Now, before we are going to evaluate this version, we need to hit it with some load, otherwise the evaluation would not have enough data for it to query.
+Therefore, we are going to use the `hey` command line tool for load generation. This tool has been installed during the installation procedure of the tutorial and we can make us of it now.
+
+```
+./hey -z 90s -c 10 http://$(k3s kubectl get ingress podtato-ingress -n prometheus-qg-quality-gate -ojsonpath='{.spec.rules[0].host}')
+```
+
+This command will send requests to the publicly available endpoint of our application for 90 seconds.
+
+Once it is finished we can trigger an evaluation of the Keptn Quality Gate via the Keptn CLI.
+
+```
+keptn trigger evaluation --project=prometheus-qg --stage=quality-gate --service=helloservice --timeframe=3m
+```
+
+
+Now it is time to have a look at the quality gate evaluation. Switch back to your Keptn's Bridge and you'll see that a new evaluation event is coming in.
+
+
+
+This time, the evaluation score is way lower than the 100% we scored earlier. We can have a look at the detailed evaluation by clicking on the π icon in the evaluation tile.
+
+We can see that this version is way slower than the previous one. Neither the `throughput` nor the `http_response_time_seconds_main_page_sum` could match our SLOs and thus the score is not sufficient to give it a "pass" from Keptn.
+
+
+
+Now you can experiment with defining your own thresholds for the SLOs!
+Access the Git repo via the following command. The credentials to it have been given at the end of your Keptn installation: by default your user is `keptn` and the password is `keptn#R0cks`.
+
+```
+echo https://$(k3s kubectl get ingress gitea-ingress -n git -ojsonpath='{.spec.rules[0].host}')/keptn/prometheus-qg/src/branch/quality-gate
+```
+
+
+
+You can also trigger new load tests via `hey` - you can find all commands earlier in this tutorial.
+After each load execution make sure to trigger the Keptn quality gates via `keptn trigger evaluation`. You can also adjust the timeframe for the evaluation, in case you are running tests that are taking longer, you also want to consider this in the timeframe of the evaluations.
+
+**Now have fun with evaluating the application with SLO-based quality gates!**
+
+
+## Troubleshooting
+
+We've made sure this tutorial is thoroughly tested on the infrastructure listed in the beginning of this tutorial. However, if you run into any issues, here are some tips for you.
+
+### Time-outs
+
+If you run into any timeout issues, that might be due to the following reasons:
+
+- Rate limiting of Dockerhub: the tutorial pulls images from Dockerhub which has a rate limit. Only 100 pulls within a time period of 6 hours are allowed. If install and uninstall this tutorial multiple times or if you are sharing your IP with others, you might run into this limit. Potentials solutions are to wait a bit and try again or to try from another machine with a different IP.
+
+- Certificate not issued: Sometime the issuing of the Let's Encrypt certificate takes longer than the 5 minutes the installation expects. If so, you can uninstall the tutorial with `k3s-uninstall.sh` and give it a fresh installation. Usually the second or third attempts are way quicker and this should work.
+
+### Get help
+
+Please don't hesitate to reach out in our Slack channel to get help!
+Join us [in the Keptn slack](https://slack.keptn.sh) and ask your questions in the #help channel. We are happy to help!
+
+
+{{ snippets/010/community/feedback.md }}
diff --git a/site/tutorials/keptn-quality-gates-prometheus-011/codelab.json b/site/tutorials/keptn-quality-gates-prometheus-011/codelab.json
new file mode 100644
index 00000000..b59dbea2
--- /dev/null
+++ b/site/tutorials/keptn-quality-gates-prometheus-011/codelab.json
@@ -0,0 +1,28 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2021-11-19T10:46:54+01:00",
+ "id": "keptn-quality-gates-prometheus-011",
+ "duration": 17,
+ "title": "Keptn Quality Gates with Prometheus",
+ "authors": "JΓΌrgen Etzlstorfer",
+ "summary": "5 minute installation. All running on K3s with a single line installation. No Kubernetes cluster needed.",
+ "source": "keptn-quality-gates-prometheus-010_gen.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "prometheus",
+ "quality-gates",
+ "k3s"
+ ],
+ "tags": [
+ "introduction",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-quality-gates-prometheus-011"
+}
diff --git a/site/tutorials/keptn-quickstart-011.md b/site/tutorials/keptn-quickstart-011.md
new file mode 100644
index 00000000..29c241e0
--- /dev/null
+++ b/site/tutorials/keptn-quickstart-011.md
@@ -0,0 +1,63 @@
+summary: Quickstart: Keptn and Prometheus
+id: keptn-quickstart-011
+categories: prometheus,quickstart,k3s
+tags: prometheus,keptn11x,quickstart
+status: Published
+authors: JΓΌrgen Etzlstorfer, Oleg Nenashev
+Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials
+
+# Quickstart: Keptn and Prometheus
+
+## Welcome
+Duration: 0:30
+
+Learn how to get Keptn running in thirty minutes with Prometheus. We'll run Keptn on a local k3d cluster. This quickstart is designed for Linux-based systems. Consequently, use Linux, MacOS, or Windows subsystem for Linux (WSL2).
+
+## Prerequisites
+
+You will need several tools to install Keptn and Prometheus locally: git, k3d, docker, kubectl and helm. You will need to use Linux, MacOS, or Windows subsystem for Linux (WSL2). Note that Docker for Mac may require special licensing if you want to follow this tutorial.
+
+See the installation and configuration guide [here](https://keptn.sh/docs/quickstart/#prerequisites).
+
+## Install Keptn
+
+First of all, you will need to create a cluster for Keptn, and then install and configure Keptn itself. We provide scripts that make it a very quick task as long as you have all tools from _Prerequisites_ ready.
+
+Follow [these guidelines](https://keptn.sh/docs/quickstart/#install-keptn) to install Keptn.
+
+## Try out Multi-Stage Delivery
+
+Keptn allows to define multi-stage delivery workflows by just declaring what needs to be done. **How to** achieve this delivery workflow is then left to other components and also here Keptn provides deployment services, which allow you to setup a multi-stage delivery workflow without a single line of pipeline code.
+
+The definition is manifested in a so-called **shipyard file** that defines a task sequence for delivery. It can hold multiple stages, each with a dedicated deployment strategy, test strategy, as well as a remediation strategy. Keptn takes the shipyard file and creates a multi-stage workflow each stage having a deployment strategy (e.g., blue/green), testing strategy (e.g., functional tests or performance tests), and an optional automated remediation strategy for triggering self-healing actions.
+
+If you are interested, try out _Multi-stage delivery_ as documented [here](https://keptn.sh/docs/quickstart/#try-multi-stage-delivery).
+
+## Try out Auto-Remediation
+
+In modern microservices environments, you have to deal with systems that can expose unpredictable behavior due to the high number of interdependencies. For example, changing the configuration of one component might have an impact on a different part of the system. Besides, problems evolve and are often dynamic. The nature and impact of a problem can also change drastically over time. Keptn addresses this challenge by introducing the concept of micro-operations that declare remediation actions for resolving certain problem types or triggering any operational tasks.
+
+If you are interested, try it out as documented [here](https://keptn.sh/docs/quickstart/#try-auto-remediation).
+
+## Explore Keptn
+
+You have a running Keptn instance, so you can browse through the web interface and try out more features while around. Also have a look at our tutorials and documentation to learn how you can use Keptn.
+
+You can find some references and suggestions [here](https://keptn.sh/docs/quickstart/#explore-keptn).
+
+## Stop Keptn
+
+If you are finished exploring Keptn, you can always stop and start the cluster and delete it eventually.
+
+```bash
+k3d cluster stop mykeptn
+k3d cluster start mykeptn
+```
+
+Or delete it if you donβt need it anymore
+
+```bash
+k3d cluster delete mykeptn
+```
+
+{{ snippets/11/community/feedback.md }}
diff --git a/site/tutorials/keptn-quickstart-011/codelab.json b/site/tutorials/keptn-quickstart-011/codelab.json
new file mode 100644
index 00000000..edbc8ab0
--- /dev/null
+++ b/site/tutorials/keptn-quickstart-011/codelab.json
@@ -0,0 +1,28 @@
+{
+ "environment": "web",
+ "format": "html",
+ "prefix": "https://storage.googleapis.com",
+ "mainga": "UA-133584243-1",
+ "updated": "2020-11-12T12:04:45+01:00",
+ "id": "keptn-quickstart-011",
+ "duration": 30,
+ "title": "Quickstart: Keptn and Prometheus",
+ "authors": "JΓΌrgen Etzlstorfer, Oleg Nenashev",
+ "summary": "Shows how to get started with Keptn and Prometheus in 30 minutes",
+ "source": "keptn-quickstart-011.md",
+ "theme": "",
+ "status": [
+ "published"
+ ],
+ "category": [
+ "prometheus",
+ "quickstart",
+ "k3s"
+ ],
+ "tags": [
+ "introduction",
+ "keptn011x"
+ ],
+ "feedback": "https://github.com/keptn/tutorials/tree/master/site/tutorials",
+ "url": "keptn-quickstart-011"
+}
diff --git a/site/tutorials/snippets/11/community/feedback.md b/site/tutorials/snippets/11/community/feedback.md
new file mode 100644
index 00000000..dc798e7f
--- /dev/null
+++ b/site/tutorials/snippets/11/community/feedback.md
@@ -0,0 +1,10 @@
+
+## Feedback
+
+Positive
+: We are happy to hear your feedback!
+
+Please visit us in our [Keptn Slack](https://slack.keptn.sh) and tell us how you like Keptn and this tutorial! We are happy to hear your thoughts & suggestions!
+
+Also, make sure to [follow us on Twitter](https://twitter.com/keptnProject) to get the latest news on Keptn, our tutorials and newest releases!
+
diff --git a/site/tutorials/snippets/11/install/authCLI-clusterIP.md b/site/tutorials/snippets/11/install/authCLI-clusterIP.md
new file mode 100644
index 00000000..f20fc97e
--- /dev/null
+++ b/site/tutorials/snippets/11/install/authCLI-clusterIP.md
@@ -0,0 +1,33 @@
+
+## Authenticate Keptn CLI
+Duration: 2:00
+
+Expose the Keptn endpoint via the following command to be able to access on localhost. Please note that the port-forward must be kept open for the next commands to succeed, therefore we recommend doing this in a separate terminal.
+
+```
+kubectl -n keptn port-forward service/api-gateway-nginx 8080:80
+```
+
+Set the following variables to make it easy to connect to Keptn.
+
+```
+KEPTN_ENDPOINT=http://localhost:8080/api
+KEPTN_API_TOKEN=$(kubectl get secret keptn-api-token -n keptn -ojsonpath='{.data.keptn-api-token}' | base64 --decode)
+```
+
+To authenticate the CLI against the Keptn cluster, use the keptn auth command:
+
+```
+keptn auth --endpoint=$KEPTN_ENDPOINT --api-token=$KEPTN_API_TOKEN
+```
+
+```
+Starting to authenticate
+Successfully authenticated
+```
+
+Positive
+: Congratulations! Your CLI is now successfully authenticated to your Keptn installation.
+
+Positive
+: Please note that the Keptn endpoint can also be publicly exposed. All details can be found in the [Keptn docs](https://keptn.sh/docs/0.10.x/operate/install/).
diff --git a/site/tutorials/snippets/11/install/authCLI-istio.md b/site/tutorials/snippets/11/install/authCLI-istio.md
new file mode 100644
index 00000000..241a435c
--- /dev/null
+++ b/site/tutorials/snippets/11/install/authCLI-istio.md
@@ -0,0 +1,37 @@
+## Connect your Keptn CLI to the Keptn installation
+
+In this section we are referring to the Linux/MacOS derivatives of the commands. If you are using a Windows host, please [follow the official instructions](https://keptn.sh/docs/0.10.x/operate/install/#authenticate-keptn-cli).
+
+First let's extract the information used to access the Keptn installation and store this for later use.
+
+
+```
+KEPTN_ENDPOINT=http://$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')/api
+KEPTN_API_TOKEN=$(kubectl get secret keptn-api-token -n keptn -ojsonpath='{.data.keptn-api-token}' | base64 --decode)
+KEPTN_BRIDGE_URL=http://$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')/bridge
+```
+
+Use this stored information and authenticate the CLI.
+
+
+```
+keptn auth --endpoint=$KEPTN_ENDPOINT --api-token=$KEPTN_API_TOKEN
+```
+
+That will give you:
+```
+Starting to authenticate
+Successfully authenticated
+```
+
+Positive
+: Congratulations - Keptn is successfully installed and your CLI is connected to your Keptn installation!
+
+If you want, you can go ahead and take a look at the Keptn API by navigating to the endpoint that is given via:
+
+
+```
+echo $KEPTN_ENDPOINT
+```
+
+
diff --git a/site/tutorials/snippets/11/install/cluster-aks.md b/site/tutorials/snippets/11/install/cluster-aks.md
new file mode 100644
index 00000000..5cd15f96
--- /dev/null
+++ b/site/tutorials/snippets/11/install/cluster-aks.md
@@ -0,0 +1,22 @@
+## Prerequisites for installation
+Duration: 5:00
+
+Please download and install the following tools if you do not have them installed on your machine already.
+
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+- Linux or MacOS (preferred as some instructions are targeted for this platforms)
+- On Windows: [Git Bash 4 Windows](https://gitforwindows.org/), [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
+
+## Setup Kubernetes cluster
+Duration: 10:00
+
+We are going to setup a Kubernetes cluster in Azure. Therefore, please download the `az` command line tool. Next, please create a cluster in the [Azure Portal](https://portal.azure.com/).
+
+1. Install local tools
+ - [az](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) and make sure to be logged in to your Azure account (with `az login`)
+
+1. Create AKS cluster
+ - Master version >= `1.15.x` (tested version: `1.16.10`)
+ - Size of the cluster: One **D8s_v3** node. Please note that you might go for a smaller or larger size cluster depending on the concrete use case. The suggested sizing is based on the recommendation for the [full tour](../../?cat=full-tour) tutorials.
+
+Find a full compatibility matrix for supported Kubernetes versions [here](https://keptn.sh/docs/0.10.x/operate/k8s_support/).
diff --git a/site/tutorials/snippets/11/install/cluster-eks.md b/site/tutorials/snippets/11/install/cluster-eks.md
new file mode 100644
index 00000000..fb36c667
--- /dev/null
+++ b/site/tutorials/snippets/11/install/cluster-eks.md
@@ -0,0 +1,28 @@
+
+## Prerequisites for installation
+Duration: 5:00
+
+Please download and install the following tools if you do not have them installed on your machine already.
+
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+- Linux or MacOS (preferred as some instructions are targeted for this platforms)
+- On Windows: [Git Bash 4 Windows](https://gitforwindows.org/), [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
+
+## Setup Kubernetes cluster
+Duration: 10:00
+
+We are going to setup a Kubernetes cluster in EKS.
+
+1. Install local tools
+ - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) (version >= 1.16.156)
+
+1. Create EKS cluster on AWS. You can do so either via the online portal of AWS or via `eksctl`.
+ - version 1.17 (tested version: 1.17)
+ - One `m5.2xlarge` node. Please note that you might go for a smaller or larger size cluster depending on the concrete use case. The suggested sizing is based on the recommendation for the [full tour](../../?cat=full-tour) tutorials.
+ - Sample script using [eksctl](https://eksctl.io/introduction/#installation) to create such a cluster
+
+ ```
+ eksctl create cluster --version=1.17 --name=keptn-cluster --node-type=m5.2xlarge --nodes=1 --region=eu-west-3
+ ```
+
+Find a full compatibility matrix for supported Kubernetes versions [here](https://keptn.sh/docs/0.10.x/operate/k8s_support/).
diff --git a/site/tutorials/snippets/11/install/cluster-gke.md b/site/tutorials/snippets/11/install/cluster-gke.md
new file mode 100644
index 00000000..00f82358
--- /dev/null
+++ b/site/tutorials/snippets/11/install/cluster-gke.md
@@ -0,0 +1,39 @@
+
+## Prerequisites for installation
+Duration: 5:00
+
+Please download and install the following tools if you do not have them installed on your machine already.
+
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+- Linux or MacOS (preferred as some instructions are targeted for this platforms)
+- On Windows: [Git Bash 4 Windows](https://gitforwindows.org/), [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
+- Alternative: [Google Cloud Shell](https://cloud.google.com/shell) (or alike)
+
+## Setup Kubernetes cluster
+Duration: 10:00
+
+We are going to setup a Kubernetes cluster in the Google Cloud Platform.
+
+1. Install local tools
+ - [gcloud](https://cloud.google.com/sdk/gcloud/)
+
+2. Create GKE cluster
+ - Master version >= `1.15.x` (tested version: `1.15.9-gke.22`)
+ - One **n1-standard-8** node. Please note that you might go for a smaller or larger size cluster depending on the concrete use case. The suggested sizing is based on the recommendation for the [full tour](../../?cat=full-tour) tutorials.
+ - Image type `ubuntu` or `cos` (**Note:** If you plan to use Dynatrace monitoring, select `ubuntu` for a more [convenient setup](https://keptn.sh/docs/0.10.x/monitoring/dynatrace/install/#notes).)
+ - Sample script to create such cluster:
+
+ ```
+ # set environment variables
+ PROJECT=nameofgcloudproject
+ CLUSTER_NAME=nameofcluster
+ ZONE=us-central1-a
+ REGION=us-central1
+ GKE_VERSION="1.15"
+ ```
+
+ ```
+ gcloud container clusters create $CLUSTER_NAME --project $PROJECT --zone $ZONE --no-enable-basic-auth --cluster-version $GKE_VERSION --machine-type "n1-standard-8" --image-type "UBUNTU" --disk-type "pd-standard" --disk-size "100" --metadata disable-legacy-endpoints=true --scopes "https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" --num-nodes "1" --enable-stackdriver-kubernetes --no-enable-ip-alias --network "projects/$PROJECT/global/networks/default" --subnetwork "projects/$PROJECT/regions/$REGION/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --no-enable-autoupgrade
+ ```
+
+Find a full compatibility matrix for supported Kubernetes versions [here](https://keptn.sh/docs/0.10.x/operate/k8s_support/).
diff --git a/site/tutorials/snippets/11/install/cluster-k3s.md b/site/tutorials/snippets/11/install/cluster-k3s.md
new file mode 100644
index 00000000..ea512fea
--- /dev/null
+++ b/site/tutorials/snippets/11/install/cluster-k3s.md
@@ -0,0 +1,33 @@
+
+## Prerequisites for installation
+Duration: 5:00
+
+Please download and install the following tools if you do not have them installed on your machine already.
+
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+- Linux or MacOS (preferred as some instructions are targeted for this platforms)
+- On Windows: [Git Bash 4 Windows](https://gitforwindows.org/), [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
+
+## Setup Kubernetes cluster with K3s
+Duration: 10:00
+
+We are going to setup a Kubernetes cluster with [K3s](https://k3s.io). Please note that K3s is natively available for Linux, therefore the following commands are for Linux hosts.
+
+1. Download, install K3s (tested with versions 1.16 to 1.18) and run K3s using the following command
+ ```
+ curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.18.3+k3s1 K3S_KUBECONFIG_MODE="644" sh -s - --no-deploy=traefik
+ ```
+ This installs version `v1.18.3+k3s1` (please refer to the [K3s GitHub releases page](https://github.com/rancher/k3s/releases) for newer releases), sets file permissions `644` on `/etc/rancher/k3s/k3s.yaml` and disables `traefik` as an ingress controller.
+
+1. Export the Kubernetes profile using
+ ```
+ export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
+ ```
+
+1. Verify that the connection to the cluster works:
+ ```
+ kubectl get nodes
+ ```
+
+
+Find a full compatibility matrix for supported Kubernetes versions [here](https://keptn.sh/docs/0.10.x/operate/k8s_support/).
diff --git a/site/tutorials/snippets/11/install/cluster-minikube.md b/site/tutorials/snippets/11/install/cluster-minikube.md
new file mode 100644
index 00000000..bf8c170c
--- /dev/null
+++ b/site/tutorials/snippets/11/install/cluster-minikube.md
@@ -0,0 +1,33 @@
+
+## Prerequisites for installation
+Duration: 5:00
+
+Please download and install the following tools if you do not have them installed on your machine already.
+
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+- Linux or MacOS (preferred as some instructions are targeted for this platforms)
+- On Windows: [Git Bash 4 Windows](https://gitforwindows.org/), [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
+
+## Setup Kubernetes cluster
+Duration: 10:00
+
+We are going to setup a Kubernetes cluster.
+
+Negative
+: Please note that at the moment only specific Minikube versions are supported.
+
+1. Download and install [Minikube](https://github.com/kubernetes/minikube/releases) (tested with versions 1.3 to 1.10).
+
+
+1. Create a new Minikube profile (named keptn) with at least 6 CPU cores and 12 GB memory using:
+ ```
+ minikube start -p keptn --cpus 6 --memory 12200
+ ```
+
+1. Start the Minikube LoadBalancer service in a second terminal by executing:
+
+ ```
+ minikube tunnel
+ ```
+
+Find a full compatibility matrix for supported Kubernetes versions [here](https://keptn.sh/docs/0.10.x/operate/k8s_support/).
diff --git a/site/tutorials/snippets/11/install/cluster-openshift.md b/site/tutorials/snippets/11/install/cluster-openshift.md
new file mode 100644
index 00000000..6912b50b
--- /dev/null
+++ b/site/tutorials/snippets/11/install/cluster-openshift.md
@@ -0,0 +1,72 @@
+
+## Prerequisites for installation
+Duration: 5:00
+
+Please download and install the following tools if you do not have them installed on your machine already.
+
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+- Linux or MacOS (preferred as some instructions are targeted for this platforms)
+- On Windows: [Git Bash 4 Windows](https://gitforwindows.org/), [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
+
+## Configure OpenShift cluster
+Duration: 10:00
+
+Negative
+: Please note that you have to bring your own OpenShift cluster in version 3.11
+
+
+1. Install local tools if not already present on your machine.
+
+ - [oc CLI - v3.11](https://github.com/openshift/origin/releases/tag/v3.11.0)
+
+1. Make sure you are connected with your `oc` CLI to your OpenShift cluster.
+
+1. On the OpenShift master node, execute the following steps:
+
+- Set up the required permissions for your user:
+
+ ```
+ oc adm policy --as system:admin add-cluster-role-to-user cluster-admin
+ ```
+
+- Set up the required permissions for the installer pod:
+
+ ```
+ oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:default:default
+ oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:kube-system:default
+ ```
+
+- Enable admission WebHooks on your OpenShift master node:
+
+ ```
+ sudo -i
+ cp -n /etc/origin/master/master-config.yaml /etc/origin/master/master-config.yaml.backup
+ oc ex config patch /etc/origin/master/master-config.yaml --type=merge -p '{
+ "admissionConfig": {
+ "pluginConfig": {
+ "ValidatingAdmissionWebhook": {
+ "configuration": {
+ "apiVersion": "apiserver.config.k8s.io/v1alpha1",
+ "kind": "WebhookAdmission",
+ "kubeConfigFile": "/dev/null"
+ }
+ },
+ "MutatingAdmissionWebhook": {
+ "configuration": {
+ "apiVersion": "apiserver.config.k8s.io/v1alpha1",
+ "kind": "WebhookAdmission",
+ "kubeConfigFile": "/dev/null"
+ }
+ }
+ }
+ }
+ }' >/etc/origin/master/master-config.yaml.patched
+ if [ $? == 0 ]; then
+ mv -f /etc/origin/master/master-config.yaml.patched /etc/origin/master/master-config.yaml
+ /usr/local/bin/master-restart api && /usr/local/bin/master-restart controllers
+ else
+ exit
+ fi
+ ```
+
+Find a full compatibility matrix for supported Kubernetes versions [here](https://keptn.sh/docs/0.10.x/operate/k8s_support/).
\ No newline at end of file
diff --git a/site/tutorials/snippets/11/install/cluster.md b/site/tutorials/snippets/11/install/cluster.md
new file mode 100644
index 00000000..28e9daf5
--- /dev/null
+++ b/site/tutorials/snippets/11/install/cluster.md
@@ -0,0 +1,18 @@
+## Bring your own Kubernetes cluster
+
+Keptn can be installed on a variety of Kubernetes distributions. Please find a full compatibility matrix for supported Kubernetes versions [here](https://keptn.sh/docs/0.10.x/operate/k8s_support/).
+
+Positive
+: For the sizing of the Kubernetes cluster we recommend a cluster with at least 8vCPUs and 30 GB of memory.
+Detailed sizing recommendations for different platforms can be found in the respective [setup tutorials](../../?cat=installation).
+
+Please find tutorials [how to set up your cluster here](../../?cat=installation). For the best tutorial experience, please follow the sizing recommendations given in the tutorials.
+
+Positive
+: Please note that if you are following one of the installation tutorials, only steps β -β’ are needed (setup of cluster) since we are going to install Keptn as part of this tutorial.
+
+Please make sure your environment matches these prerequisites:
+
+- kubectl
+- Linux or MacOS (preferred as some instructions are targeted for these platforms)
+- On Windows: [Git Bash 4 Windows](https://gitforwindows.org/), [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
diff --git a/site/tutorials/snippets/11/install/configureIstio.md b/site/tutorials/snippets/11/install/configureIstio.md
new file mode 100644
index 00000000..cc452972
--- /dev/null
+++ b/site/tutorials/snippets/11/install/configureIstio.md
@@ -0,0 +1,73 @@
+## Configure Istio
+
+We are using Istio for traffic routing and as an ingress to our cluster. To make the setup experience as smooth as possible we have provided some scripts for your convenience. If you want to run the Istio configuration yourself step by step, please [take a look at the Keptn documentation](https://keptn.sh/docs/0.10.x/operate/install/#option-3-expose-keptn-via-an-ingress).
+
+The first step for our configuration automation for Istio is downloading the configuration bash script from Github:
+
+
+```
+curl -o configure-istio.sh https://raw.githubusercontent.com/keptn/examples/0.10.0/istio-configuration/configure-istio.sh
+```
+
+After that you need to make the file executable using the `chmod` command.
+
+
+```
+chmod +x configure-istio.sh
+```
+
+Finally, let's run the configuration script to automatically create your Ingress resources.
+
+
+```
+./configure-istio.sh
+```
+
+### What is actually created
+
+Positive
+: There is no need to copy the following resources, they are for information purposes only.
+
+With this script, you have created an Ingress based on the following manifest.
+
+```
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ annotations:
+ kubernetes.io/ingress.class: istio
+ name: api-keptn-ingress
+ namespace: keptn
+spec:
+ rules:
+ - host: .nip.io
+ http:
+ paths:
+ - backend:
+ serviceName: api-gateway-nginx
+ servicePort: 80
+```
+
+In addition, the script has created a gateway resource for you so that the onboarded services are also available publicly.
+
+```
+---
+apiVersion: networking.istio.io/v1alpha3
+kind: Gateway
+metadata:
+ name: public-gateway
+ namespace: istio-system
+spec:
+ selector:
+ istio: ingressgateway
+ servers:
+ - port:
+ name: http
+ number: 80
+ protocol: HTTP
+ hosts:
+ - '*'
+```
+
+Finally, the script restarts the `helm-service` pod of Keptn to fetch this new configuration.
diff --git a/site/tutorials/snippets/11/install/download-keptnCLI.md b/site/tutorials/snippets/11/install/download-keptnCLI.md
new file mode 100644
index 00000000..05eaff80
--- /dev/null
+++ b/site/tutorials/snippets/11/install/download-keptnCLI.md
@@ -0,0 +1,48 @@
+
+
+## Download Keptn CLI
+Duration: 3:00
+
+Every release of Keptn provides binaries for the Keptn CLI. These binaries are available for Linux, macOS, and Windows.
+
+There are multiple options how to get the Keptn CLI on your machine.
+
+- Easiest option (works on Linux, Mac OS, Windows with Bash and WSL2):
+
+
+ ```
+ curl -sL https://get.keptn.sh | KEPTN_VERSION=0.10.0 bash
+ ```
+ This will download and install the Keptn CLI in the specified version automatically.
+
+- Using HomeBrew (on MacOs):
+
+ ```
+ brew install keptn
+ ```
+
+- Another option is to manually download the current release of the Keptn CLI:
+ 1. Download the version for your operating system and architecture from [Download CLI](https://github.com/keptn/keptn/releases/tag/0.10.0)
+ 2. Unpack the download
+ 3. Find the `keptn` binary (e.g., `keptn-0.10.0-amd64.exe`) in the unpacked directory and rename it to `keptn`
+ - *Linux / macOS*: Add executable permissions (``chmod +x keptn``), and move it to the desired destination (e.g. `mv keptn /usr/local/bin/keptn`)
+
+ - *Windows*: Copy the executable to the desired folder and add the executable to your PATH environment variable.
+
+
+Now, you should be able to run the Keptn CLI:
+- Linux / macOS
+
+
+ ```
+ keptn --help
+ ```
+
+- Windows
+ ```
+ .\keptn.exe --help
+ ```
+
+Positive
+: For the rest of the documentation we will stick to the *Linux / macOS* version of the commands.
+
diff --git a/site/tutorials/snippets/11/install/install-full.md b/site/tutorials/snippets/11/install/install-full.md
new file mode 100644
index 00000000..361f623c
--- /dev/null
+++ b/site/tutorials/snippets/11/install/install-full.md
@@ -0,0 +1,56 @@
+
+## Install Keptn in your cluster
+Duration: 5:00
+
+To install the latest release of Keptn with full _quality gate + continuous delivery capabilities_ in your Kubernetes cluster, execute the `keptn install` command.
+
+
+
+```
+keptn install --endpoint-service-type=ClusterIP --use-case=continuous-delivery
+```
+
+
+
+Positive
+: The installation process will take about 3-5 minutes.
+
+Positive
+: Please note that Keptn comes with different installation options, all of the described in detail in the [Keptn docs](https://keptn.sh/docs/0.10.x/operate/install/).
+
+### Installation details
+
+By default Keptn installs into the `keptn` namespace. Once the installation is complete we can verify the deployments:
+
+
+```
+kubectl get deployments -n keptn
+```
+
+Here is the output of the command:
+
+```
+NAME READY UP-TO-DATE AVAILABLE AGE
+api-gateway-nginx 1/1 1 1 2m44s
+api-service 1/1 1 1 2m44s
+approval-service 1/1 1 1 2m44s
+bridge 1/1 1 1 2m44s
+configuration-service 1/1 1 1 2m44s
+helm-service 1/1 1 1 2m44s
+jmeter-service 1/1 1 1 2m44s
+lighthouse-service 1/1 1 1 2m44s
+litmus-service 1/1 1 1 2m44s
+mongodb 1/1 1 1 2m44s
+mongodb-datastore 1/1 1 1 2m44s
+remediation-service 1/1 1 1 2m44s
+shipyard-controller 1/1 1 1 2m44s
+statistics-service 1/1 1 1 2m44s
+```
+
+
+
+
diff --git a/site/tutorials/snippets/11/install/istio.md b/site/tutorials/snippets/11/install/istio.md
new file mode 100644
index 00000000..87085f19
--- /dev/null
+++ b/site/tutorials/snippets/11/install/istio.md
@@ -0,0 +1,29 @@
+
+## Install Istio
+Duration: 10:00
+
+Download the Istio command line tool by [following the official instructions](https://istio.io/latest/docs/setup/install/) or by executing the following steps.
+
+
+```
+curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.11.2 sh -
+```
+
+Check the version of Istio that has been downloaded and execute the installer from the corresponding folder, e.g.:
+
+
+
+
+```
+./istio-1.11.2/bin/istioctl install
+```
+
+The installation of Istio should be finished within a couple of minutes.
+
+```
+This will install the Istio default profile with ["Istio core" "Istiod" "Ingress gateways"] components into the cluster. Proceed? (y/N) y
+β Istio core installed
+β Istiod installed
+β Ingress gateways installed
+β Installation complete
+```
\ No newline at end of file
diff --git a/site/tutorials/snippets/11/install/open-bridge.md b/site/tutorials/snippets/11/install/open-bridge.md
new file mode 100644
index 00000000..ac5fe9bc
--- /dev/null
+++ b/site/tutorials/snippets/11/install/open-bridge.md
@@ -0,0 +1,27 @@
+
+## Open Keptn's Bridge & API
+Duration: 1:00
+
+Now that you have installed Keptn you can take a look at its user interface aka the Keptn's Bridge.
+
+### Keptn's Bridge
+
+Open a browser and navigate to [http://localhost:8080](http://localhost:8080) to take look. The bridge will be empty at this point but when using Keptn it will be populated with events.
+
+If asked for credentials, you can get them by executing the following commands.
+
+```
+echo Username: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_USERNAME}" | base64 --decode)
+echo Password: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_PASSWORD}" | base64 --decode)
+```
+
+
+
+Positive
+: We are frequently providing new versions of the Keptn's Bridge with new functionality. - [learn more here](https://keptn.sh/docs/0.10.x/reference/bridge/)!
+
+### Keptn API
+
+Besides the Keptn's Bridge, please consider also taking a look at the Keptn API endpoint if you are interested to interact with Keptn via the API. Keptn comes with a fully documented swagger-API that can be found under the `/api` endpoint.
+
+
diff --git a/site/tutorials/snippets/11/install/postinstall-tutorials.md b/site/tutorials/snippets/11/install/postinstall-tutorials.md
new file mode 100644
index 00000000..d9618108
--- /dev/null
+++ b/site/tutorials/snippets/11/install/postinstall-tutorials.md
@@ -0,0 +1,11 @@
+
+## Proceed with exploring Keptn
+Duration: 1:00
+
+Now that you have successfully installed Keptn, you can explore other tutorials!
+
+Here are some possibilities:
+
+- Take a full tour on Keptn with either [Prometheus](../../?cat=prometheus) or [Dynatrace](../../?cat=dynatrace)
+- Explore [Keptn Quality Gates](../../?cat=quality-gates)
+- Explore [Automated Operations with Keptn](../../?cat=automated-operations)
diff --git a/site/tutorials/snippets/11/integrations/gettingStarted.md b/site/tutorials/snippets/11/integrations/gettingStarted.md
new file mode 100644
index 00000000..15961dc5
--- /dev/null
+++ b/site/tutorials/snippets/11/integrations/gettingStarted.md
@@ -0,0 +1,13 @@
+
+## Getting started with Keptn integrations
+Duration: 3:00
+
+Keptn can be easily extended with external tools such as notification tools, other [SLI providers](https://keptn.sh/docs/0.10.x/quality_gates/sli/), bots to interact with Keptn, etc.
+While we do not cover additional integrations in this tutorial, please feel fee to take a look at our integration repositories:
+
+- [Keptn Contrib](https://github.com/keptn-contrib) lists mature Keptn integrations that you can use for your Keptn installation
+- [Keptn Sandbox](https://github.com/keptn-sandbox) collects mostly new integrations and those that are currently under development - however, you can also find useful integrations here.
+
+Positive
+: We are happy to receive your contributions - please [follow this guide](https://github.com/keptn-sandbox/contributing) if you want to contribute your own services to Keptn
+
diff --git a/site/tutorials/snippets/11/manage/createProject-crc.md b/site/tutorials/snippets/11/manage/createProject-crc.md
new file mode 100644
index 00000000..54a696e8
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/createProject-crc.md
@@ -0,0 +1,175 @@
+
+## Create your first project
+Duration: 5:00
+
+A project in Keptn is the logical unit that can hold multiple (micro)services. Therefore, it is the starting point for each Keptn installation.
+
+To get all files you need for this tutorial, please clone the example repo to your local machine.
+
+
+
+```
+git clone --branch 0.10.0 https://github.com/keptn/examples.git --single-branch
+
+cd examples/onboarding-carts
+```
+
+
+Create a new project for your services using the `keptn create project` command. In this example, the project is called *sockshop*. Before executing the following command, make sure you are in the `examples/onboarding-carts` folder.
+
+**Recommended:** Create a new project with Git upstream:
+
+To configure a Git upstream for this tutorial, the Git user (`--git-user`), an access token (`--git-token`), and the remote URL (`--git-remote-url`) are required. If a requirement is not met, go to [the Keptn documentation](https://keptn.sh/docs/0.10.x/manage/git_upstream/) where instructions for GitHub, GitLab, and Bitbucket are provided.
+
+Let's define the variables before running the command:
+
+
+
+```
+GIT_USER=gitusername
+GIT_TOKEN=gittoken
+GIT_REMOTE_URL=remoteurl
+```
+
+Now let's create the project using the `keptn create project` command.
+
+```
+keptn create project sockshop --shipyard=./shipyard.yaml --git-user=$GIT_USER --git-token=$GIT_TOKEN --git-remote-url=$GIT_REMOTE_URL
+```
+
+
+**Alternatively:** If you don't want to use a Git upstream, you can create a new project without it but please note that this is not the recommended way:
+
+
+```
+keptn create project sockshop --shipyard=./shipyard.yaml
+```
+
+
+For creating the project, the tutorial relies on a `shipyard.yaml` file as shown below:
+
+```
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-sockshop"
+spec:
+ stages:
+ - name: "dev"
+ sequences:
+ - name: "delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "test"
+ properties:
+ teststrategy: "functional"
+ - name: "evaluation"
+ - name: "release"
+ - name: "delivery-direct"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "staging"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "dev.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "test"
+ properties:
+ teststrategy: "performance"
+ - name: "evaluation"
+ - name: "release"
+ - name: "rollback"
+ triggeredOn:
+ - event: "staging.delivery.finished"
+ selector:
+ match:
+ result: "fail"
+ tasks:
+ - name: "rollback"
+ - name: "delivery-direct"
+ triggeredOn:
+ - event: "dev.delivery-direct.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "production"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "staging.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "release"
+ - name: "rollback"
+ triggeredOn:
+ - event: "production.delivery.finished"
+ selector:
+ match:
+ result: "fail"
+ tasks:
+ - name: "rollback"
+ - name: "delivery-direct"
+ triggeredOn:
+ - event: "staging.delivery-direct.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "remediation"
+ triggeredOn:
+ - event: "production.remediation.finished"
+ selector:
+ match:
+ evaluation.result: "fail"
+ tasks:
+ - name: "get-action"
+ - name: "action"
+ - name: "evaluation"
+ triggeredAfter: "15m"
+ properties:
+ timeframe: "15m"
+
+```
+
+This shipyard contains three stages: dev, staging, and production. This results in the three Kubernetes namespaces: sockshop-dev, sockshop-staging, and sockshop-production.
+
+* **dev** will have a direct (big bang) deployment strategy and functional tests are executed
+* **staging** will have a blue/green deployment strategy with automated approvals for passing quality gates as well as quality gates which result in warnings. As configured, performance tests are executed.
+* **production** will have a blue/green deployment strategy without any further testing. Approvals are done automatically for passed quality gates but manual approval is needed for quality gate evaluations that result in a warning. The configured remediation strategy is used for self-healing in production.
+
+
+Positive
+: To learn more about a *shipyard* file, please take a look at the [Shipyard specification](https://github.com/keptn/spec/blob/master/shipyard.md).
+
+Let's take a look at the project that we have just created. We can find all this information in the Keptn's Bridge.
+Therefore, we need the credentials that have been automatically generated for us.
+
+
+```
+echo Username: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_USERNAME}" | base64 --decode)
+echo Password: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_PASSWORD}" | base64 --decode)
+```
+
+Now use these credentials to access it on your [Keptn's Bridge](echo http://api-gateway-nginx-keptn.apps-crc.testing/bridge).
+
+
+You will find the just created project in the bridge with all stages.
+
+
diff --git a/site/tutorials/snippets/11/manage/createProject.md b/site/tutorials/snippets/11/manage/createProject.md
new file mode 100644
index 00000000..29445e46
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/createProject.md
@@ -0,0 +1,179 @@
+
+## Create your first project
+Duration: 5:00
+
+A project in Keptn is the logical unit that can hold multiple (micro)services. Therefore, it is the starting point for each Keptn installation.
+
+To get all files you need for this tutorial, please clone the example repo to your local machine.
+
+
+
+```
+git clone --branch 0.10.0 https://github.com/keptn/examples.git --single-branch
+
+cd examples/onboarding-carts
+```
+
+
+Create a new project for your services using the `keptn create project` command. In this example, the project is called *sockshop*. Before executing the following command, make sure you are in the `examples/onboarding-carts` folder.
+
+**Recommended:** Create a new project with Git upstream:
+
+To configure a Git upstream for this tutorial, the Git user (`--git-user`), an access token (`--git-token`), and the remote URL (`--git-remote-url`) are required. If a requirement is not met, go to [the Keptn documentation](https://keptn.sh/docs/0.10.x/manage/git_upstream/) where instructions for GitHub, GitLab, and Bitbucket are provided.
+
+Let's define the variables before running the command:
+
+```
+GIT_USER=gitusername
+GIT_TOKEN=gittoken
+GIT_REMOTE_URL=remoteurl
+```
+
+Now let's create the project using the `keptn create project` command.
+
+```
+keptn create project sockshop --shipyard=./shipyard.yaml --git-user=$GIT_USER --git-token=$GIT_TOKEN --git-remote-url=$GIT_REMOTE_URL
+```
+
+Negative
+: Please note that the Git repo **must not** be initialized - it has to be an empty repository without any branch or commit. [See the docs for more details](https://keptn.sh/docs/0.9.x/manage/git_upstream/).
+
+**Alternatively:** If you don't want to use a Git upstream, you can create a new project without it but please note that this is not the recommended way:
+
+
+```
+keptn create project sockshop --shipyard=./shipyard.yaml
+```
+
+
+For creating the project, the tutorial relies on a `shipyard.yaml` file as shown below:
+
+```
+apiVersion: "spec.keptn.sh/0.2.0"
+kind: "Shipyard"
+metadata:
+ name: "shipyard-sockshop"
+spec:
+ stages:
+ - name: "dev"
+ sequences:
+ - name: "delivery"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "test"
+ properties:
+ teststrategy: "functional"
+ - name: "evaluation"
+ - name: "release"
+ - name: "delivery-direct"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "staging"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "dev.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "test"
+ properties:
+ teststrategy: "performance"
+ - name: "evaluation"
+ - name: "release"
+ - name: "rollback"
+ triggeredOn:
+ - event: "staging.delivery.finished"
+ selector:
+ match:
+ result: "fail"
+ tasks:
+ - name: "rollback"
+ - name: "delivery-direct"
+ triggeredOn:
+ - event: "dev.delivery-direct.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "production"
+ sequences:
+ - name: "delivery"
+ triggeredOn:
+ - event: "staging.delivery.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "blue_green_service"
+ - name: "release"
+ - name: "rollback"
+ triggeredOn:
+ - event: "production.delivery.finished"
+ selector:
+ match:
+ result: "fail"
+ tasks:
+ - name: "rollback"
+ - name: "delivery-direct"
+ triggeredOn:
+ - event: "staging.delivery-direct.finished"
+ tasks:
+ - name: "deployment"
+ properties:
+ deploymentstrategy: "direct"
+ - name: "release"
+
+ - name: "remediation"
+ triggeredOn:
+ - event: "production.remediation.finished"
+ selector:
+ match:
+ evaluation.result: "fail"
+ tasks:
+ - name: "get-action"
+ - name: "action"
+ - name: "evaluation"
+ triggeredAfter: "15m"
+ properties:
+ timeframe: "15m"
+
+```
+
+
+This shipyard contains three stages: dev, staging, and production. Later in the tutorial, deployments will be made in three corresponding Kubernetes namespaces: `sockshop-dev`, `sockshop-staging`, and `sockshop-production`.
+
+* **dev** will have a direct (big bang) deployment strategy and functional tests are executed
+* **staging** will have a blue/green deployment strategy with automated approvals for passing quality gates as well as quality gates which result in warnings. As configured, performance tests are executed.
+* **production** will have a blue/green deployment strategy without any further testing. Approvals are done automatically for passed quality gates but manual approval is needed for quality gate evaluations that result in a warning. The configured remediation strategy is used for self-healing in production.
+
+
+Positive
+: To learn more about a *shipyard* file, please take a look at the [Shipyard specification](https://github.com/keptn/spec/blob/master/shipyard.md).
+
+Let's take a look at the project that we have just created in the Keptn's Bridge. To access it, visit the URL contained in `$KEPTN_BRIDGE_URL` using the command:
+
+
+```
+echo $KEPTN_BRIDGE_URL
+```
+
+You can view the Keptn Bridge credentials using the following commands:
+
+
+```
+echo Username: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_USERNAME}" | base64 --decode)
+echo Password: $(kubectl get secret -n keptn bridge-credentials -o jsonpath="{.data.BASIC_AUTH_PASSWORD}" | base64 --decode)
+```
+
+You will find the just created project in the bridge with all stages.
+
+
diff --git a/site/tutorials/snippets/11/manage/onboardService-crc.md b/site/tutorials/snippets/11/manage/onboardService-crc.md
new file mode 100644
index 00000000..2635d711
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/onboardService-crc.md
@@ -0,0 +1,153 @@
+
+## Create first microservice
+Duration: 5:00
+
+After creating the project, services can be created for our project.
+
+1. Create the **carts** service using the [keptn create service](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_create_service/) and [keptn add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/)commands:
+
+
+ ```
+ keptn create service carts --project=sockshop
+ keptn add-resource --project=sockshop --service=carts --all-stages --resource=./carts.tgz --resourceUri=helm/carts.tgz
+ ```
+
+1. After creating the service, tests (i.e., functional- and performance tests) need to be added as basis for quality gates in the different stages:
+
+ * Functional tests for *dev* stage:
+
+
+ ```
+ keptn add-resource --project=sockshop --stage=dev --service=carts --resource=jmeter/basiccheck.jmx --resourceUri=jmeter/basiccheck.jmx
+ ```
+
+ * Performance tests for *staging* stage:
+
+
+ ```
+ keptn add-resource --project=sockshop --stage=staging --service=carts --resource=jmeter/load.jmx --resourceUri=jmeter/load.jmx
+ ```
+
+ **Note:** You can adapt the tests in `basiccheck.jmx` as well as `load.jmx` for your service. However, you must not rename the files because there is a hardcoded dependency on these file names in the current implementation of Keptn's jmeter-service.
+
+Since the carts service requires a mongodb database, a second service needs to be created.
+
+* Create the **carts-db** service using the [keptn create service](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_create_service/) and [keptn add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/)commands.
+
+
+ ```
+ keptn create service carts-db --project=sockshop
+ keptn add-resource --project=sockshop --service=carts-db --all-stages --resource=./carts-db.tgz --resourceUri=helm/carts-db.tgz
+ ```
+
+Take a look in your Keptn's Bridge and see the newly onboarded services.
+
+
+
+## Deploy first build with Keptn
+Duration: 5:00
+
+After onboarding the services, a built artifact of each service can be deployed.
+
+1. Deploy the carts-db service by executing the [keptn trigger delivery](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_trigger_delivery/) command:
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts-db --image=docker.io/mongo --tag=4.2.2 --sequence=delivery-direct
+ ```
+
+
+
+1. Deploy the carts service by specifying the built artifact, which is stored on DockerHub and tagged with version 0.12.1:
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts --image=docker.io/keptnexamples/carts --tag=0.12.1
+ ```
+
+
+
+1. Go to Keptn's Bridge and check which events have already been generated.
+ 
+
+
+1. **Optional:** Verify the pods that should have been created for services carts and carts-db:
+
+
+ ```
+ kubectl get pods --all-namespaces | grep carts-
+ ```
+
+ ```
+ sockshop-dev carts-77dfdc664b-25b74 1/1 Running 0 10m
+ sockshop-dev carts-db-54d9b6775-lmhf6 1/1 Running 0 13m
+ sockshop-production carts-db-54d9b6775-4hlwn 2/2 Running 0 12m
+ sockshop-production carts-primary-79bcc7c99f-bwdhg 2/2 Running 0 2m15s
+ sockshop-staging carts-db-54d9b6775-rm8rw 2/2 Running 0 12m
+ sockshop-staging carts-primary-79bcc7c99f-mbbgq 2/2 Running 0 7m24s
+ ```
+
+## View carts service
+Duration: 2:00
+
+1. Get the URL for your carts service with the following commands in the respective namespaces:
+
+ - [http://carts.sockshop-dev.apps-crc.testing](http://carts.sockshop-dev.apps-crc.testing)
+ - [http://carts.sockshop-staging.apps-crc.testing](http://carts.sockshop-staging.apps-crc.testing)
+ - [http://carts.sockshop-production.apps-crc.testing](http://carts.sockshop-production.apps-crc.testing)
+
+1. Navigate to the URLs to inspect the carts service. In the production namespace, you should receive an output similar to this:
+
+ 
+
+
+## Generate traffic
+Duration: 2:00
+
+Now that the service is running in all three stages, let us generate some traffic so we have some data we can base the evaluation on.
+
+Change the directory to `examples/load-generation/cartsloadgen`. If you are still in the onboarding-carts directory, use the following command or change it accordingly:
+
+
+```
+cd ../load-generation/cartsloadgen
+```
+
+Now let us deploy a pod that will generate some traffic for all three stages of our demo environment.
+
+
+```
+kubectl apply -f deploy/cartsloadgen-base.yaml
+```
+
+
+
+The output will look similar to this.
+```
+namespace/loadgen created
+deployment.extensions/cartsloadgen created
+```
+
+Optionally, you can verify that the load generator has been started.
+
+
+
+```
+kubectl get pods -n loadgen
+```
+
+```
+NAME READY STATUS RESTARTS AGE
+cartsloadgen-5dc47c85cf-kqggb 1/1 Running 0 117s
+```
+
+
+
diff --git a/site/tutorials/snippets/11/manage/onboardService.md b/site/tutorials/snippets/11/manage/onboardService.md
new file mode 100644
index 00000000..34a913d4
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/onboardService.md
@@ -0,0 +1,164 @@
+
+## Create first microservice
+Duration: 5:00
+
+After creating the project, services can be created for our project.
+
+1. Create the **carts** service using the [keptn create service](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_create_service/) and [keptn add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/)commands:
+
+
+ ```
+ keptn create service carts --project=sockshop
+ keptn add-resource --project=sockshop --service=carts --all-stages --resource=./carts.tgz --resourceUri=helm/carts.tgz
+ ```
+
+1. After creating the service, tests (i.e., functional- and performance tests) need to be added as basis for quality gates in the different stages:
+
+ * Functional tests for *dev* stage:
+
+
+ ```
+ keptn add-resource --project=sockshop --stage=dev --service=carts --resource=jmeter/basiccheck.jmx --resourceUri=jmeter/basiccheck.jmx
+ ```
+
+ * Performance tests for *staging* stage:
+
+
+ ```
+ keptn add-resource --project=sockshop --stage=staging --service=carts --resource=jmeter/load.jmx --resourceUri=jmeter/load.jmx
+ ```
+
+ **Note:** You can adapt the tests in `basiccheck.jmx` as well as `load.jmx` for your service. However, you must not rename the files because there is a hardcoded dependency on these file names in the current implementation of Keptn's jmeter-service.
+
+Since the carts service requires a mongodb database, a second service needs to be created.
+
+* Create the **carts-db** service using the [keptn create service](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_create_service/) and [keptn add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/)commands.
+
+
+ ```
+ keptn create service carts-db --project=sockshop
+ keptn add-resource --project=sockshop --service=carts-db --all-stages --resource=./carts-db.tgz --resourceUri=helm/carts-db.tgz
+ ```
+
+Take a look in your Keptn's Bridge and see the newly created services.
+
+
+
+## Deploy first build with Keptn
+Duration: 5:00
+
+After creating the services, a built artifact of each service can be deployed.
+
+1. Deploy the carts-db service by executing the [keptn trigger delivery](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_trigger_delivery/) command:
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts-db --image=docker.io/mongo --tag=4.2.2 --sequence=delivery-direct
+ ```
+
+
+
+1. Deploy the carts service by specifying the built artifact, which is stored on DockerHub and tagged with version 0.12.1:
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts --image=docker.io/keptnexamples/carts --tag=0.12.1
+ ```
+
+
+
+1. Go to Keptn's Bridge and check which events have already been generated.
+ 
+
+
+1. **Optional:** Verify the pods that should have been created for services carts and carts-db:
+
+
+ ```
+ kubectl get pods --all-namespaces | grep carts-
+ ```
+
+ ```
+ sockshop-dev carts-77dfdc664b-25b74 1/1 Running 0 10m
+ sockshop-dev carts-db-54d9b6775-lmhf6 1/1 Running 0 13m
+ sockshop-production carts-db-54d9b6775-4hlwn 2/2 Running 0 12m
+ sockshop-production carts-primary-79bcc7c99f-bwdhg 2/2 Running 0 2m15s
+ sockshop-staging carts-db-54d9b6775-rm8rw 2/2 Running 0 12m
+ sockshop-staging carts-primary-79bcc7c99f-mbbgq 2/2 Running 0 7m24s
+ ```
+
+## View carts service
+Duration: 2:00
+
+1. Get the URL for your carts service with the following commands in the respective namespaces:
+
+
+ ```
+ echo http://carts.sockshop-dev.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+
+ ```
+ echo http://carts.sockshop-staging.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+
+ ```
+ echo http://carts.sockshop-production.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+1. Navigate to the URLs to inspect the carts service. In the production namespace, you should receive an output similar to this:
+
+ 
+
+
+## Generate traffic
+Duration: 2:00
+
+Now that the service is running in all three stages, let us generate some traffic so we have some data we can base the evaluation on.
+
+Change the directory to `examples/load-generation/cartsloadgen`. If you are still in the onboarding-carts directory, use the following command or change it accordingly:
+
+
+```
+cd ../load-generation/cartsloadgen
+```
+
+Now let us deploy a pod that will generate some traffic for all three stages of our demo environment.
+
+
+```
+kubectl apply -f deploy/cartsloadgen-base.yaml
+```
+
+
+
+The output will look similar to this.
+```
+namespace/loadgen created
+deployment.extensions/cartsloadgen created
+```
+
+Optionally, you can verify that the load generator has been started.
+
+
+
+```
+kubectl get pods -n loadgen
+```
+
+```
+NAME READY STATUS RESTARTS AGE
+cartsloadgen-5dc47c85cf-kqggb 1/1 Running 0 117s
+```
+
+
+
diff --git a/site/tutorials/snippets/11/manage/simplenode/createProject.md b/site/tutorials/snippets/11/manage/simplenode/createProject.md
new file mode 100644
index 00000000..77520e43
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/simplenode/createProject.md
@@ -0,0 +1,39 @@
+## Create a Keptn Project
+Duration: 2:00
+
+A project in Keptn is the logical unit that can hold multiple (micro)services. Therefore, it is the starting point for each Keptn installation.
+
+To get all files you need for this tutorial, please clone the example repo to your local machine.
+```
+git clone --branch 0.10.0 https://github.com/keptn/examples.git --single-branch
+
+cd examples/simplenodeservice
+```
+
+Create a new project for your services using the `keptn create project` command. In this example, the project is called *simplenodeproject*. Before executing the following command, make sure you are in the `examples/simplenodeservice/keptn` folder.
+Please note that [defining a Git upstream](https://keptn.sh/docs/0.9.x/manage/project/#select-git-based-upstream) is recommended, but in case that is not wanted the parameters `git-user`, `git-token` and `git-remote-url` can be omitted.
+
+```
+keptn create project simplenodeproject --shipyard=./shipyard.yaml --git-user=GIT_USER --git-token=GIT_TOKEN --git-remote-url=GIT_REMOTE_URL
+```
+
+For creating the project, the tutorial relies on a `shipyard.yaml` file as shown below:
+
+```
+stages:
+ - name: "staging"
+ deployment_strategy: "direct"
+ test_strategy: "performance"
+ - name: "prod"
+ deployment_strategy: "blue_green_service"
+ test_strategy: "performance"
+```
+
+This shipyard contains two stages: staging, and prod. This results in the three Kubernetes namespaces: simplenodeproject-staging, and simplenodeproject-prod.
+
+* **staging** will have a direct (big bang) deployment strategy and performance tests are executed. If tests are good and SLI/SLO based quality gates are passed Keptn will promote it to the *prod* stage
+* **prod** will have a blue/green deployment strategy also using performance tests to validate that deployment and eventually switch between blue/green in case performance testing has revealed a problem
+
+Positive
+: To learn more about a *shipyard* file, please take a look at the [Shipyard specification](https://github.com/keptn/spec/blob/master/shipyard.md).
+
diff --git a/site/tutorials/snippets/11/manage/simplenode/createProjectQualityStageOnly.md b/site/tutorials/snippets/11/manage/simplenode/createProjectQualityStageOnly.md
new file mode 100644
index 00000000..1e852b48
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/simplenode/createProjectQualityStageOnly.md
@@ -0,0 +1,32 @@
+## Create a Keptn Quality Gate Project
+Duration: 2:00
+
+A project in Keptn is the logical unit that can hold multiple (micro)services. Therefore, it is the starting point for each Keptn installation.
+
+To get all files you need for this tutorial, please clone the example repo to your local machine.
+```
+git clone --branch 0.10.0 https://github.com/keptn/examples.git --single-branch
+
+cd examples/simplenodeservicequality-gate-only
+```
+
+For this quality gate focused tutorial we will create a new Keptn project using `keptn create project` to create a project called *qgproject* using the shipyard_qualitystageonly.yaml as a shipyard definition.
+Before executing the following command, make sure you are in the `examples/simplenodeservicequality-gate-only` folder.
+Please note that [defining a Git upstream](https://keptn.sh/docs/0.10.x/manage/project/#select-git-based-upstream) is recommended, but in case that is not wanted the parameters `git-user`, `git-token` and `git-remote-url` can be omitted.
+
+```
+keptn create project qgproject --shipyard=./shipyard_qualitystageonly.yaml --git-user=GIT_USER --git-token=GIT_TOKEN --git-remote-url=GIT_REMOTE_URL
+```
+
+For our purpose we create a simple project with a single stage called **qualitystage** as we only use Keptn for quality gate evaluations instead of using Keptn for multi-stage delivery pipelines. The content of this shipyard file is rather simple:
+
+```
+stages:
+ - name: "qualitystage"
+```
+
+Later - as we onboard services - we will be able to use this qualitystage to let Keptn evaluate our SLI/SLO based quality gates!
+
+Positive
+: To learn more about a *shipyard* file, please take a look at the [Shipyard specification](https://github.com/keptn/spec/blob/master/shipyard.md).
+
diff --git a/site/tutorials/snippets/11/manage/simplenode/createServiceQualityStageOnly.md b/site/tutorials/snippets/11/manage/simplenode/createServiceQualityStageOnly.md
new file mode 100644
index 00000000..1283424b
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/simplenode/createServiceQualityStageOnly.md
@@ -0,0 +1,13 @@
+
+## Add our microservice to our project
+Duration: 2:00
+
+After creating a Keptn project, services can be added. If we want to use Keptn for Progressive Delivery where Keptn deploys our service using Helm we can use `keptn create service` and `keptn add-resource`. In the case where Keptn doesn't do the deployment but is used for Quality Gate or Performance Evaluation we use the command [keptn create service](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_create_service/):
+
+```
+keptn create service evalservice --project=qgproject
+```
+
+This command just created a service we call *evalservice* and we added it to our *qgproject*
+
+We can validate that this service was added by opening up Keptn's bridge and navigate to the *qgproject*.
\ No newline at end of file
diff --git a/site/tutorials/snippets/11/manage/simplenode/onboardService.md b/site/tutorials/snippets/11/manage/simplenode/onboardService.md
new file mode 100644
index 00000000..fffb44a2
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/simplenode/onboardService.md
@@ -0,0 +1,40 @@
+
+## Create a microservice for our project
+Duration: 2:00
+
+After creating the project, services can be created for our project.
+
+1. Create the **simplenode** service using the [keptn create service](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_create_service/) and [keptn add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/)commands:
+
+```
+keptn create service simplenode --project=simplenodeproject
+keptn add-resource --project=simplenodeproject --service=simplenode --all-stages --resource=./carts.tgz --resourceUri=helm/simplenode.tgz
+```
+
+We have passed a helm charts directory to create a service. Keptn will use this Helm Chart for its delivery. It will also automatically create the respective deployments for our blue/green and direct deployment strategies in staging and prod. There is nothing we have to worry about
+
+
+## Deploy first build with Keptn
+Duration: 2:00
+
+After creating our service we can immediately start using Keptn to deploy an artifact.
+
+1. Lets deploy version 1 of our simplenode service by executing the [keptn send event new-artifact](https://keptn.sh/docs/0.10.x/reference/cli/#keptn-send-event-new-artifact) command:
+
+```
+keptn send event new-artifact --project=simplenodeproject --service=simplenode --image=grabnerandi/simplenodeservice --tag=1.0.0
+```
+
+Keptn will now start deploying version 1.0.0 into staging. During the first deployment some special initial steps are performed, e.g: namespaces get created for each stage.
+But - as we haven't yet uploaded tests and not specified SLI/SLOs for the Quality Gates Keptn will skip these checks and promote the artifact rather quickly into production. Overall that process should not take longer than 2-3 minutes
+
+1. **Optional:** Verify the pods that should have been created for services carts and carts-db:
+
+```
+kubectl get pods --all-namespaces | grep simplenode
+```
+
+```
+simplenodeproject-prod simplenode-54d9b6775-4hlwn 1/1 Running 0 12m
+simplenodeproject-staging simplenode-54d9b6775-rm8rw 1/1 Running 0 12m
+```
diff --git a/site/tutorials/snippets/11/manage/simplenode/validateFirstServiceDeployment.md b/site/tutorials/snippets/11/manage/simplenode/validateFirstServiceDeployment.md
new file mode 100644
index 00000000..2102fd1b
--- /dev/null
+++ b/site/tutorials/snippets/11/manage/simplenode/validateFirstServiceDeployment.md
@@ -0,0 +1,18 @@
+
+## Validate deployment of first version
+Duration: 2:00
+
+After every deployment we can start in the Keptn's bridge to validate the progress. We can answer questions like
+* Did the deployment already happen?
+* What is the URL of the deployed service in each stage?
+* Did anything bad happen?
+
+1. Go to Keptn's Bridge and see how Keptn has deployed the service into staging and then production:
+
+The bridge also gives you access to the links of the deployed service.
+
+
+If you click on them you should see a new browser window pop open showing you version 1 in staging and version 1 in production:
+
+
+
diff --git a/site/tutorials/snippets/11/monitoring/configureDynatraceSlis.md b/site/tutorials/snippets/11/monitoring/configureDynatraceSlis.md
new file mode 100644
index 00000000..25e4c999
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/configureDynatraceSlis.md
@@ -0,0 +1,34 @@
+## Setup SLI provider
+Duration: 2:00
+
+During the evaluation of a quality gate, the dynatrace-service can be used to fetch the values for the SLIs that are referenced in an SLO configuration. In our example, we are going to customize the SLIs made available to Keptn by adding an SLI configuration file.
+
+Prior to executing the next step, please make sure you are in the correct folder `examples/onboarding-carts`. If not, please change the directory accordingly, e.g., with `cd ../../onboarding-carts/`.
+
+Next, add a global SLI configuration file to the project for all services and stages we create.
+
+
+
+
+```
+keptn add-resource --project=sockshop --resource=sli-config-dynatrace.yaml --resourceUri=dynatrace/sli.yaml
+```
+
+For your information, this is what the file looks like:
+```
+---
+spec_version: '1.0'
+indicators:
+ throughput: "metricSelector=builtin:service.requestCount.total:merge(\"dt.entity.service\"):sum&entitySelector=type(SERVICE),tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT)"
+ error_rate: "metricSelector=builtin:service.errors.total.count:merge(\"dt.entity.service\"):avg&entitySelector=type(SERVICE),tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT)"
+ response_time_p50: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(50)&entitySelector=type(SERVICE),tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT)"
+ response_time_p90: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(90)&entitySelector=type(SERVICE),tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT)"
+ response_time_p95: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(95)&entitySelector=type(SERVICE),tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT)"
+```
+
+Configure the already onboarded project with the new SLI provider for Keptn to create some needed resources (e.g., a configmap):
+
+
+```
+keptn configure monitoring dynatrace --project=sockshop
+```
diff --git a/site/tutorials/snippets/11/monitoring/setupDynatrace-crc.md b/site/tutorials/snippets/11/monitoring/setupDynatrace-crc.md
new file mode 100644
index 00000000..5256f29f
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/setupDynatrace-crc.md
@@ -0,0 +1,166 @@
+
+## Setup Dynatrace
+Duration: 7:00
+
+For enabling the Keptn Quality Gates and for production monitoring, we are going to use Dynatrace as the data provider. Therefore, we are going to setup Dynatrace in our Kubernetes cluster to have our sample application monitored and we can use the monitoring data for both the basis for evaluating quality gates as well as a trigger to start self-healing.
+
+Positive
+: You have to bring your own Dynatrace tenant
+
+If you don't have a Dynatrace tenant yet, sign up for a [free trial](https://www.dynatrace.com/trial?utm_campaign=keptn) or a [developer account](https://www.dynatrace.com/developer/).
+
+## Gather Dynatrace tokens
+Duration: 6:00
+
+1. Create a Dynatrace API Token
+
+ Log in to your Dynatrace tenant and go to **Settings > Integration > Dynatrace API**. Then, create a new API token with the following permissions:
+
+ - Access problem and event feed, metrics, and topology
+ - Read log content
+ - Read configuration
+ - Write configuration
+ - Capture request data
+ - Read metrics
+ - Ingest metrics
+ - Read entities
+
+ Take a look at this screenshot to double check the right token permissions for you.
+
+ 
+ 
+
+1. Create a Dynatrace PaaS Token
+
+ In your Dynatrace tenant, go to **Settings > Integration > Platform as a Service**, and create a new PaaS Token.
+
+1. Store your credentials in a Kubernetes secret by executing the following command. The `DT_TENANT` has to be set according to the appropriate pattern:
+ - Dynatrace SaaS tenant (this format is most likely for you): `{your-environment-id}.live.dynatrace.com`
+ - Dynatrace-managed tenant: `{your-domain}/e/{your-environment-id}`
+
+ If running on a Unix/Linux based system, you can use variables for ease of use. Naturally, it is also fine to just replace the values in the `kubectl` command itself.
+
+
+
+
+
+ ```
+ export DT_TENANT=yourtenant.live.dynatrace.com
+ export DT_API_TOKEN=yourAPItoken
+ export DT_PAAS_TOKEN=yourPAAStoken
+ ```
+
+ Negative
+ : Please make sure your DT_TENANT does _not contain_ any trailing slashes nor a https:// in the beginning.
+
+ If you used the variables, the next command can be copied and pasted without modifications. If you have not set the variables, please make sure to set the right values in the next command.
+
+
+ ```
+ oc -n keptn create secret generic dynatrace --from-literal="DT_API_TOKEN=$DT_API_TOKEN" \
+ --from-literal="DT_TENANT=$DT_TENANT" \
+ --from-literal="KEPTN_API_URL=http://api-gateway-nginx-keptn.apps-crc.testing/api" \
+ --from-literal="KEPTN_API_TOKEN=$KEPTN_API_TOKEN" -o yaml --dry-run=client | oc apply -f -
+ ```
+
+## Deploy Dynatrace OneAgent Operator
+
+To make the tutorial experience as smooth as possible, we are providing an automation script to setup the Dynatrace OneAgent operator in your Kubernetes cluster. For details on the installation, we refer to the [official Dynatrace documentation](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/deploy-oneagent-k8/). You can download and run the script using the following instructions.
+
+1. Downloading the automation file.
+
+
+ ```
+ curl -o deploy-dynatrace-oneagent.sh https://raw.githubusercontent.com/keptn/examples/release-0.9.0/dynatrace-oneagent/deploy-dynatrace-oneagent-openshift.sh
+ ```
+
+1. Making the file executable using the `chmod` command.
+
+
+ ```
+ chmod +x deploy-dynatrace-oneagent.sh
+ ```
+
+1. Executing the script to automatically deploys the Dynatrace OneAgent Operator.
+
+
+ ```
+ ./deploy-dynatrace-oneagent.sh
+ ```
+
+1. Optional: Verify if all pods in the Dynatrace namespace are running. It might take up to 1-2 minutes for all pods to be up and running.
+
+
+ ```
+ kubectl get pods -n dynatrace
+ ```
+
+ ```
+ dynatrace-oneagent-operator-696fd89b76-n9d9n 1/1 Running 0 6m26s
+ dynatrace-oneagent-webhook-78b6d99c85-h9759 2/2 Running 0 6m25s
+ oneagent-g9m42 1/1 Running 0 69s
+ ```
+
+
+## Install Dynatrace integration
+Duration: 5:00
+
+1. The Dynatrace integration into Keptn is handled by the *dynatrace-service*. To install the *dynatrace-service*, execute:
+
+
+ ```
+ kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/dynatrace-service/release-0.14.0/deploy/service.yaml
+ ```
+
+1. When the service is deployed, use the following command to install Dynatrace on your cluster. If Dynatrace is already deployed, the current deployment of Dynatrace will not be modified.
+
+
+ ```
+ keptn configure monitoring dynatrace --suppress-websocket
+ ```
+
+ Output should be similar to this:
+ ```
+ ID of Keptn context: 79f19c36-b718-4bb6-88d5-cb79f163289b
+ Configuring Dynatrace monitoring
+ Dynatrace OneAgent Operator is installed on cluster
+ Setting up auto-tagging rules in Dynatrace Tenant
+ Tagging rule keptn_service already exists
+ Tagging rule keptn_stage already exists
+ Tagging rule keptn_project already exists
+ Tagging rule keptn_deployment already exists
+ Setting up problem notifications in Dynatrace Tenant
+ Checking Keptn alerting profile availability
+ Keptn alerting profile available
+ Dynatrace Monitoring setup done
+ ```
+
+**Verify Dynatrace configuration**
+
+Since Keptn has configured your Dynatrace tenant, let us take a look what has be done for you:
+
+
+- *Tagging rules:* When you navigate to **Settings > Tags > Automatically applied tags** in your Dynatrace tenant, you will find following tagging rules:
+ - keptn_deployment
+ - keptn_project
+ - keptn_service
+ - keptn_stage
+
+ This means that Dynatrace will automatically apply tags to your onboarded services.
+
+- *Problem notification:* A problem notification has been set up to inform Keptn of any problems with your services to allow auto-remediation. You can check the problem notification by navigating to **Settings > Integration > Problem notifications** and you will find a **keptn remediation** problem notification.
+
+- *Alerting profile:* An alerting profile with all problems set to *0 minutes* (immediate) is created. You can review this profile by navigating to **Settings > Alerting > Alerting profiles**.
+
+- *Dashboard and Management zone:* When creating a new Keptn project or executing the [keptn configure monitoring](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_configure_monitoring/) command for a particular project (see Note 1), a dashboard and management zone will be generated reflecting the environment as specified in the shipyard file.
+
+### Verify installation
+
+At the end of your installation, please verify that all Dynatrace resources are in a Ready and Running status by executing `kubectl get pods -n dynatrace`:
+
+```
+NAME READY STATUS RESTARTS AGE
+dynatrace-oneagent-operator-7f477bf78d-dgwb6 1/1 Running 0 8m21s
+oneagent-b22m4 1/1 Running 0 8m21s
+oneagent-k7jn6 1/1 Running 0 8m21s
+```
diff --git a/site/tutorials/snippets/11/monitoring/setupDynatrace.md b/site/tutorials/snippets/11/monitoring/setupDynatrace.md
new file mode 100644
index 00000000..314d6ae1
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/setupDynatrace.md
@@ -0,0 +1,149 @@
+
+## Setup Dynatrace
+Duration: 7:00
+
+For enabling the Keptn Quality Gates and for production monitoring, we are going to use Dynatrace as the data provider. Therefore, we are going to setup Dynatrace in our Kubernetes cluster to have our sample application monitored and we can use the monitoring data for both the basis for evaluating quality gates as well as a trigger to start self-healing.
+
+Positive
+: You have to bring your own Dynatrace tenant
+
+If you don't have a Dynatrace tenant yet, sign up for a [free trial](https://www.dynatrace.com/trial?utm_campaign=keptn) or a [developer account](https://www.dynatrace.com/developer/).
+
+## Deploy Dynatrace OneAgent Operator
+
+To monitor a Kubernetes environment using Dynatrace, please setup dynatrace-operator as described below, or visit the [official Dynatrace documentation](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/deploy-oneagent-k8/).
+
+For setting up dynatrace-operator, perform the following steps:
+
+1. Log into your Dynatrace environment
+1. Open Dynatrace Hub (on the left hand side, scroll down to **Manage** and click on **Hub**)
+1. Within Dynatrace Hub, search for Kubernetes
+ 
+1. Click on Kubernetes, and select **Monitor Kubernetes** at the bottom of the screen
+1. In the following screen, select the Platform and click on **Create tokens** to generate PaaS and API tokens.
+1. Select options appropriate for your cluster:
+ - By default, most Kubernetes clusters will only offer a self-signed certificate. In such cases, please select *Skip SSL Security Check* when deploying the Dynatrace OneAgent Operator.
+ - When deploying the Dynatrace OneAgent Operator to a cluster running on a *Container-Optimized OS (cos)*, which includes GKE, Anthos, CaaS and PKS environments, please select the *Enable volume storage* option.
+ 
+1. Copy the generated code and run it in a terminal/bash
+1. Optional: Verify if all pods in the `dynatrace` namespace are running. It might take up to 1-2 minutes for all pods to be up and running.
+
+
+ ```
+ kubectl get pods -n dynatrace
+ ```
+
+ ```
+ NAME READY STATUS RESTARTS AGE
+ dynakube-kubemon-0 1/1 Running 0 11h
+ dynatrace-oneagent-operator-cc9856cfd-hrv4x 1/1 Running 0 2d11h
+ dynatrace-oneagent-webhook-5d67c9bb76-pz2gh 2/2 Running 0 2d11h
+ dynatrace-operator-fb56f7f59-pf5sg 1/1 Running 0 2d11h
+ oneagent-gc2lc 1/1 Running 0 35h
+ oneagent-w7msm 1/1 Running 0 35h
+ ```
+
+ Note: If you are on newer versions of OneAgent / Dynatrace Operator, pods might look as follows:
+ ```
+ NAME READY STATUS RESTARTS AGE
+ dynakube-classic-d2ckw 1/1 Running 0 1d13h
+ dynakube-kubemon-0 1/1 Running 0 15h
+ dynakube-routing-0 1/1 Running 0 23h
+ dynatrace-operator-fb56f7f59-pf5sg 1/1 Running 0 1d13h
+ ```
+
+ **Note**: In case any pods are crashing with `CrashLoopBackOff` or `Error`, please double check that you ticked *Enable volume storage*. Alternatively, please take a look at [the official OneAgent troubleshooting guide](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/maintenance/troubleshoot-deployment-and-connectivity/#anchor_deploy).
+
+1. Optional: Verify in your Dynatrace Environment under the section *Kubernetes* that your cluster is monitored.
+
+## Create Dynatrace API token
+Duration: 6:00
+
+1. Create a Dynatrace API Token
+
+ Log in to your Dynatrace tenant and go to **Settings > Integration > Dynatrace API**. Then, generate a new API token with the following permissions:
+
+ - Access problem and event feed, metrics, and topology
+ - Read log content
+ - Read configuration
+ - Write configuration
+ - Capture request data
+ - Read metrics
+ - Ingest metrics
+ - Read entities
+
+ Take a look at this screenshot to double check the right token permissions for you.
+
+ 
+ 
+
+1. Store your credentials in a Keptn-managed secret by executing the following command. The `DT_TENANT` has to be set according to the appropriate pattern:
+ - Dynatrace SaaS tenant (this format is most likely for you): `{your-environment-id}.live.dynatrace.com`
+ - Dynatrace-managed tenant: `{your-domain}/e/{your-environment-id}`
+
+ If running on a Unix/Linux based system, you can use variables for ease of use. Naturally, it is also fine to just replace the values in the `keptn` command itself.
+
+
+
+
+ ```
+ DT_TENANT=yourtenant.live.dynatrace.com
+ DT_API_TOKEN=yourAPItoken
+ ```
+
+ If you used the variables, the next command can be copied and pasted without modifications. If you have not set the variables, please make sure to set the right values in the next command.
+
+
+ ```
+ keptn create secret dynatrace --scope=dynatrace-service --from-literal="DT_TENANT=$DT_TENANT" --from-literal="DT_API_TOKEN=$DT_API_TOKEN"
+ ```
+
+## Install Dynatrace integration
+Duration: 5:00
+
+1. The *dynatrace-service* integrates Dynatrace into Keptn. The latest version may be installed using the helm chart available in the Releases section of the GitHub project. Please use the same namespace for the dynatrace-service as you are using for Keptn, e.g. `keptn`:
+
+
+ ```
+ helm upgrade --install dynatrace-service -n keptn https://github.com/keptn-contrib/dynatrace-service/releases/download/0.17.1/dynatrace-service-0.17.1.tgz --set dynatraceService.config.keptnApiUrl=$KEPTN_ENDPOINT --set dynatraceService.config.keptnBridgeUrl=$KEPTN_BRIDGE_URL --set dynatraceService.config.generateTaggingRules=true --set dynatraceService.config.generateProblemNotifications=true --set dynatraceService.config.generateManagementZones=true --set dynatraceService.config.generateDashboards=true --set dynatraceService.config.generateMetricEvents=true
+
+ ```
+
+1. Once installed, use the following command to configure Keptn to use Dynatrace for monitoring. This will also set up monitoring in your Dynatrace environment.
+
+
+ ```
+ keptn configure monitoring dynatrace
+ ```
+
+ The output of the command will tell you what has been set up in your Dynatrace environment:
+ ```
+ ID of Keptn context: 79f19c36-b718-4bb6-88d5-cb79f163289b
+ Dynatrace monitoring setup done.
+ The following entities have been configured:
+
+ ...
+ ---Problem Notification:---
+ - Successfully set up Keptn Alerting Profile and Problem Notifications
+ ...
+
+ ```
+
+**Verify Dynatrace configuration**
+
+Since Keptn has configured your Dynatrace tenant, let us take a look what has be done for you:
+
+
+- *Tagging rules:* When you navigate to **Settings > Tags > Automatically applied tags** in your Dynatrace tenant, you will find following tagging rules:
+ - keptn_deployment
+ - keptn_project
+ - keptn_service
+ - keptn_stage
+
+ This means that Dynatrace will automatically apply tags to your onboarded services.
+
+- *Problem notification:* A problem notification has been set up to inform Keptn of any problems with your services to allow auto-remediation. You can check the problem notification by navigating to **Settings > Integration > Problem notifications** and you will find a **keptn remediation** problem notification.
+
+- *Alerting profile:* An alerting profile with all problems set to *0 minutes* (immediate) is created. You can review this profile by navigating to **Settings > Alerting > Alerting profiles**.
+
+- *Dashboard and Management zone:* When creating a new Keptn project or executing the [keptn configure monitoring](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_configure_monitoring/) command for a particular project (see Note 1), a dashboard and management zone will be generated reflecting the environment as specified in the shipyard file.
diff --git a/site/tutorials/snippets/11/monitoring/setupPrometheus.md b/site/tutorials/snippets/11/monitoring/setupPrometheus.md
new file mode 100644
index 00000000..12ed0538
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/setupPrometheus.md
@@ -0,0 +1,94 @@
+
+## Setup Prometheus Monitoring
+Duration: 3:00
+
+After creating a project and service, you can set up Prometheus monitoring and configure scrape jobs using the Keptn CLI.
+
+Keptn doesn't install or manage Prometheus and its components. Users need to install Prometheus and Prometheus Alert manager as a prerequisite.
+
+* To install the Prometheus and Alert Manager, execute:
+
+```
+kubectl create ns monitoring
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm install prometheus prometheus-community/prometheus --namespace monitoring
+```
+
+### Execute the following steps to install prometheus-service
+
+* Download the Keptn's Prometheus service manifest
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/prometheus-service/release-0.7.1/deploy/service.yaml
+```
+
+* Replace the environment variable value according to the use case and apply the manifest
+
+```
+# Prometheus installed namespace
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" PROMETHEUS_NS="monitoring"
+
+# Setup Prometheus Endpoint
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" PROMETHEUS_ENDPOINT="http://prometheus-server.monitoring.svc.cluster.local:80"
+
+# Alert Manager installed namespace
+kubectl set env deployment/prometheus-service -n keptn --containers="prometheus-service" ALERT_MANAGER_NS="monitoring"
+```
+
+* Install Role and Rolebinding to permit Keptn's prometheus-service for performing operations in the Prometheus installed namespace.
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/prometheus-service/release-0.7.1/deploy/role.yaml -n monitoring
+```
+
+
+
+
+* Execute the following command to install Prometheus and set up the rules for the *Prometheus Alerting Manager*:
+
+```
+keptn configure monitoring prometheus --project=sockshop --service=carts
+```
+
+
+
+
+### Optional: Verify Prometheus setup in your cluster
+
+* To verify that the Prometheus scrape jobs are correctly set up, you can access Prometheus by enabling port-forwarding for the prometheus-service:
+
+```
+kubectl port-forward svc/prometheus-server 8080:80 -n monitoring
+```
+
+Prometheus is then available on [localhost:8080/targets](http://localhost:8080/targets) where you can see the targets for the service:
+
+
+We are going to add the configuration for our SLIs in terms of an SLI file that maps the _name_ of an indicator to a PromQL statement how to actually query it. Please make sure you are in the correct folder `examples/onboarding-carts`.
+
+### Prometheus SLI provider
+
+During the evaluation of a quality gate, the Prometheus provider is required that is implemented by an internal Keptn service, the *prometheus-service*. This service will _fetch the values_ for the SLIs that are referenced in an SLO configuration file.
+
+We are going to add the configuration for our SLIs in terms of an SLI file that maps the _name_ of an indicator to a PromQL statement how to actually query it. Please make sure you are in the correct folder `examples/onboarding-carts`.
+
+
+
+
+```
+keptn add-resource --project=sockshop --stage=staging --service=carts --resource=sli-config-prometheus-bg.yaml --resourceUri=prometheus/sli.yaml
+```
+
+For your information, the contents of the file are as follows:
+```
+---
+spec_version: '1.0'
+indicators:
+ response_time_p50: histogram_quantile(0.5, sum by(le) (rate(http_response_time_milliseconds_bucket{handler="ItemsController.addToCart",job="$SERVICE-$PROJECT-$STAGE-canary"}[$DURATION_SECONDS])))
+ response_time_p90: histogram_quantile(0.9, sum by(le) (rate(http_response_time_milliseconds_bucket{handler="ItemsController.addToCart",job="$SERVICE-$PROJECT-$STAGE-canary"}[$DURATION_SECONDS])))
+ response_time_p95: histogram_quantile(0.95, sum by(le) (rate(http_response_time_milliseconds_bucket{handler="ItemsController.addToCart",job="$SERVICE-$PROJECT-$STAGE-canary"}[$DURATION_SECONDS])))
+```
\ No newline at end of file
diff --git a/site/tutorials/snippets/11/monitoring/simplenode/createLoadTestingDashboard.md b/site/tutorials/snippets/11/monitoring/simplenode/createLoadTestingDashboard.md
new file mode 100644
index 00000000..9017177c
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/simplenode/createLoadTestingDashboard.md
@@ -0,0 +1,14 @@
+## Create Load Testing Dashboard
+Duration: 2:00
+
+While it is great that Keptn pulls in all these metrics automatically for us and evaluates them as part of the quality gate - some of us might still want to look at a dashboard - seeing all metrics in real-time while tests are running. Or maybe going back in time and explore the details of a test that ran in the past.
+Dynatrace provides an automation API to create dashboards, allowing us to create a dashboard that shows all key metrics of our application in a single view.
+
+Make sure you navigate to the folder *examples\simplenodeservice\dynatrace*. Now execute this
+```
+$ ./createLoadTestingDashboard.sh
+```
+
+This script will create a new dashboard in Dynatrace called "Keptn Performance as a Self-Service Insights Dashboard". Go to Dynatrace, click on Dashboards and open it up. It should look somewhat like this!
+
+
\ No newline at end of file
diff --git a/site/tutorials/snippets/11/monitoring/simplenode/setupDynatraceSLIProvider.md b/site/tutorials/snippets/11/monitoring/simplenode/setupDynatraceSLIProvider.md
new file mode 100644
index 00000000..bda14092
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/simplenode/setupDynatraceSLIProvider.md
@@ -0,0 +1,17 @@
+## Setup SLI provider
+Duration: 2:00
+
+During the evaluation of a quality gate, the Dynatrace SLI provider is required that is implemented by an internal Keptn service, the dynatrace-sli-service. This service will fetch the values for the SLIs that are referenced in an SLO configuration.
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/dynatrace-sli-service/release-0.10.3/deploy/service.yaml
+```
+
+Configure the already onboarded project with the new SLI provider:
+
+```
+keptn configure monitoring dynatrace --project=simplenodeproject
+```
+
+Positive
+: Since we already installed the Dynatrace service, the SLI provider can fetch the credentials to connect to Dynatrace from the same secret we created earlier.
diff --git a/site/tutorials/snippets/11/monitoring/simplenode/setupDynatraceSLIProviderQualityStageOnly.md b/site/tutorials/snippets/11/monitoring/simplenode/setupDynatraceSLIProviderQualityStageOnly.md
new file mode 100644
index 00000000..b54ce939
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/simplenode/setupDynatraceSLIProviderQualityStageOnly.md
@@ -0,0 +1,17 @@
+## Setup SLI provider
+Duration: 2:00
+
+During the evaluation of a quality gate, the Dynatrace SLI provider is required that is implemented by an internal Keptn service, the dynatrace-sli-service. This service will fetch the values for the SLIs that are referenced in an SLO configuration.
+
+```
+kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/dynatrace-sli-service/release-0.10.3/deploy/service.yaml -n keptn
+```
+
+Configure the already onboarded project with the new SLI provider:
+
+```
+keptn configure monitoring dynatrace --project=qgproject
+```
+
+Positive
+: Since we already installed the Dynatrace service, the SLI provider can fetch the credentials to connect to Dynatrace from the same secret we created earlier.
diff --git a/site/tutorials/snippets/11/monitoring/simplenode/validateMonitoringData.md b/site/tutorials/snippets/11/monitoring/simplenode/validateMonitoringData.md
new file mode 100644
index 00000000..333637d7
--- /dev/null
+++ b/site/tutorials/snippets/11/monitoring/simplenode/validateMonitoringData.md
@@ -0,0 +1,12 @@
+
+## Validate Dynatrace Monitoring Data
+Duration: 2:00
+
+Once Keptn has deployed our application and we have successfully validated that the app is indeed running by accessing the app through its URL we can also validate that Dynatrace is monitoring not only your k8s cluster but also the app we have deployed.
+
+In Dynatrace use the navigation menu on the left and navigate to the Host view. You should find an entry for each of your k8s cluster nodes. Click one of them. You should see host metrics, list of processes & containers, events ...
+Via the `...` button you can access the Smartscape view which gives you full stack visibility of everything that is hosted on that k8s cluster node. You should also see our deployed Node.js services which you can click on and navigate to the detailed view:
+
+
+
+If you navigate to the service view you will notice that the service has 4 tags on it: keptn_project, keptn_stage, keptn_service and keptn_deployment. These tags are extracted from the Helm Chart which is passing this information via DT_CUSTOM_PROP. These tags also later allow Keptn to query data exactly for a specific deployed service, e.g: only data from our service deployed in staging!
diff --git a/site/tutorials/snippets/11/quality-gate-only/simplenode/executeQualityGateThroughAPI.md b/site/tutorials/snippets/11/quality-gate-only/simplenode/executeQualityGateThroughAPI.md
new file mode 100644
index 00000000..723a8e17
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gate-only/simplenode/executeQualityGateThroughAPI.md
@@ -0,0 +1,90 @@
+## Run Quality Gate through Keptn API
+Duration: 2:00
+
+After we have executed the Quality Gate Evaluation through the CLI lets do the same through the [Keptn API](https://keptn.sh/docs/0.10.x/reference/api/).
+If you read through the [Keptn API documentation](https://keptn.sh/docs/0.10.x/reference/api/) you learn we have to get the API Endpoint and the Token first!
+
+1. Get API Endpoint
+
+Keptn status gives us the endpoint:
+
+```
+$ keptn status
+Starting to authenticate
+Successfully authenticated
+Using a file-based storage for the key because the password-store seems to be not set up.
+CLI is authenticated against the Keptn cluster https://YOURKEPTNDOMAIN/api
+```
+
+2. Retrieving API Token
+
+In order to retrieve the API token we need access to kubectl. There is an easy and convenient way described in the [Keptn CLI Authentication documentation](https://keptn.sh/docs/0.6.0/reference/cli/#authentication) to actually retrieve and store both the API Endpoint and API Token in a variable. This helps us later on to automate API calls as well. So - lets do it!
+```
+KEPTN_ENDPOINT=https://YOURKEPTNDOMAIN/api
+KEPTN_API_TOKEN=$(kubectl get secret keptn-api-token -n keptn -ojsonpath={.data.keptn-api-token} | base64 --decode)
+```
+
+3. Explore the Keptn API via Swagger UI
+
+If you want to make yourself familiar with all options of the API you can browse to your api endpoint and explore the Swagger UI by adding /swagger-ui to the API Endpoint, e.g: https://YOURKEPTNDOMAIN/api/swagger-ui
+
+4. Send a start-evaluation event
+
+The easiest way to make API calls is through the Swagger UI. In order to use it we first need to authorize the Swagger UI by giving it our API token. To get the actual value do:
+```
+echo $KEPTN_API_TOKEN
+```
+
+Copy that value, navigate to your Swagger UI in your browser, make sure you have selected the *api-service* and then click Authorize. Now paste in your token and login!
+
+Now we are ready to send a start-evaluation event just as we did before through the CLI. The only difference is that the API expects a start & time timestamp and doesnt provide the convenient option of a timeframe. That's why you have to make sure to put in timestamps where you know you have data in your Dynatrace environment. Here is my prepared POST body including the JSON object for start-evaluation:
+
+```
+{
+ "type": "sh.keptn.event.start-evaluation",
+ "source": "https://github.com/keptn/keptn",
+ "data": {
+ "start": "2020-05-27T07:00:00.000Z",
+ "end": "2020-05-27T07:05:00.000Z",
+ "project": "qgproject",
+ "stage": "qualitystage",
+ "service": "evalservice",
+ "teststrategy": "manual"
+ }
+}
+```
+
+Please take attention that the above CloudEvent contains the property `"teststrategy": "manual"`. This is required to tell Keptn that we didn't use Keptn to execute any tests prior to the evaluation but that we just want to do a manual evaluation.
+
+Negative
+: Please remember that the start and end time has to be changed to reflect the time frame you want to evaluate!
+
+In the Swagger UI scroll to the POST /event API call. Click on Try, then post the JSON body into the edit field like shown here:
+
+
+The great thing about Swagger UI is that it will not only execute the request and give you the response. It will also give you the corresponding CURL command so you can easily integrate this API call into your automation scripts. The following shows the output including the response which includes the keptnContext ID of our triggered evaluation:
+
+
+For your reference - here is the CURL command for easy copy/paste in case you want to execute this from your command line:
+```
+curl -X POST "$KEPTN_ENDPOINT/v1/event" -H "accept: application/json" -H "x-token: $KEPTN_API_TOKEN" -H "Content-Type: application/json" -d "{ \"type\": \"sh.keptn.event.start-evaluation\", \"source\": \"https://github.com/keptn/keptn\", \"data\": { \"start\": \"2020-05-27T07:00:00.000Z\", \"end\": \"2020-05-27T07:05:00.000Z\", \"project\": \"qgproject\", \"stage\": \"qualitystage\", \"service\": \"evalservice\", \"teststrategy\": \"manual\" }}"
+```
+
+5. Query for evaluation-done
+
+Remember from when we ran through this exercise with the Keptn CLI? The evaluation may take up to a minute. In order to check whether the evaluation is done and what the result was we can now call the GET /events API endpoint and query the existence of an sh.keptn.events.evaluation-done event for the keptnContext we retrieved earlier:
+
+You can execute this through the Swagger UI or - as it is rather simple and straight forward - using the following CURL:
+
+```
+curl -X GET "$KEPTN_ENDPOINT/v1/event?keptnContext=KEPTN_CONTEXT_ID&type=sh.keptn.events.evaluation-done" -k -H "accept: application/json" -H "x-token: $KEPTN_API_TOKEN"
+```
+
+The result comes in the form of the `evaluation-done` event, which is specified [here](https://github.com/keptn/spec/blob/0.1.3/cloudevents.md#evaluation-done).
+
+Here the screenshot of the response in Swagger UI:
+
+
+6. Evaluate in Bridge & Dynatrace
+
+Just as we did before you can also validate the data in the Keptn's Bridge as well as in Dynatrace!
diff --git a/site/tutorials/snippets/11/quality-gate-only/simplenode/executeQualityGateThroughCLI.md b/site/tutorials/snippets/11/quality-gate-only/simplenode/executeQualityGateThroughCLI.md
new file mode 100644
index 00000000..ebf8aee5
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gate-only/simplenode/executeQualityGateThroughCLI.md
@@ -0,0 +1,44 @@
+## Run Quality Gate through Keptn CLI
+Duration: 2:00
+
+We should now have everything in place to let Keptn evaluate our quality gate. The only thing we need to do is ask Keptn to start an evaluation for our service and give it a timeframe. The easiest way to do this is by using the Keptn CLI using the command [keptn trigger evaluation](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_trigger_evaluation/)
+
+1. Start the evaluation via the CLI
+
+The following is an example to have Keptn evaluate the last 10 minutes by also adding some labels which will later show up in the Keptn's Bridge!
+
+```
+keptn trigger evaluation --project=qgproject --stage=qualitystage --service=evalservice --timeframe=10m --labels=gaterun=1,type=viacli
+```
+
+Please explore all other options in the Keptn CLI Documentation for [keptn trigger evaluation](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_trigger_evaluation/). You can also specify start and end timestamps or also combine start timestamp with a timeframe.
+
+What will come back as an output is the Keptn Context. Something like:
+```
+ID of Keptn context: f628eb68-849c-4e77-ab69-a504af34a081
+```
+
+2. Query status of the evaluation via the CLI
+
+As the evaluation is an asynchronous process it may take a while until the results are available. We can use that Keptn Context with a call [keptn get event evaluation-done](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_get_event_evaluation-done/) to query the status of our request by asking Keptn whether the event evaluation-done is already available for a specific Keptn context.
+
+```
+keptn get event sh.keptn.event.evaluation.finished --keptn-context=f628eb68-849c-4e77-ab69-a504af34a081
+```
+
+While Keptn is still evaluation you will get a message that querying evaluation-done was not yet successful. Once the evaluation is complete you will however receive the full evaluation result like this:
+```
+{"contenttype":"application/json","data":{"deploymentstrategy":"","evaluation":{"indicatorResults":[{"score":0,"status":"fail","targets":[{"criteria":"\u003c=800","targetValue":800,"violated":true},{"criteria":"\u003c=+10%","targetValue":0,"violated":false},{"criteria":"\u003c600","targetValue":600,"violated":true}],"value":{"metric":"rt_svc_p95","success":true,"value":1153.0662}},{"score":0,"status":"fail","targets":[{"criteria":"\u003e20000","targetValue":20000,"violated":true}],"value":{"metric":"throughput_svc","success":true,"value":189}},{"score":2,"status":"pass","targets":[{"criteria":"\u003c=1%","targetValue":1,"violated":false}],"value":{"metric":"error_rate_svc","success":true,"value":0}},{"score":0,"status":"info","targets":null,"value":{"metric":"rt_svc_p50","success":true,"value":0.404}},{"score":0,"status":"fail","targets":[{"criteria":"\u003c=+10%","targetValue":0.7960919999999999,"violated":true},{"criteria":"\u003c=+10%","targetValue":0.7960919999999999,"violated":true}],"value":{"metric":"rt_svc_p90","success":true,"value":111.49150000000942}}],"result":"fail","score":40,"sloFileContent":"Y29tcGFyaXNvbjoKICBhZ2dyZWdhdGVfZnVuY3Rpb246IGF2ZwogIGNvbXBhcmVfd2l0aDogc2luZ2xlX3Jlc3VsdAogIGluY2x1ZGVfcmVzdWx0X3dpdGhfc2NvcmU6IHBhc3MKICBudW1iZXJfb2ZfY29tcGFyaXNvbl9yZXN1bHRzOiAzCmZpbHRlcjogbnVsbApvYmplY3RpdmVzOgotIGtleV9zbGk6IGZhbHNlCiAgcGFzczoKICAtIGNyaXRlcmlhOgogICAgLSA8PSsxMCUKICAgIC0gPDYwMAogIHNsaTogcnRfc3ZjX3A5NQogIHdhcm5pbmc6CiAgLSBjcml0ZXJpYToKICAgIC0gPD04MDAKICB3ZWlnaHQ6IDEKLSBrZXlfc2xpOiBmYWxzZQogIHBhc3M6CiAgLSBjcml0ZXJpYToKICAgIC0gJz4yMDAwMCcKICBzbGk6IHRocm91Z2hwdXRfc3ZjCiAgd2FybmluZzogbnVsbAogIHdlaWdodDogMQotIGtleV9zbGk6IGZhbHNlCiAgcGFzczoKICAtIGNyaXRlcmlhOgogICAgLSA8PTElCiAgc2xpOiBlcnJvcl9yYXRlX3N2YwogIHdhcm5pbmc6CiAgLSBjcml0ZXJpYToKICAgIC0gPD0yJQogIHdlaWdodDogMgotIGtleV9zbGk6IGZhbHNlCiAgcGFzczogbnVsbAogIHNsaTogcnRfc3ZjX3A1MAogIHdhcm5pbmc6IG51bGwKICB3ZWlnaHQ6IDEKLSBrZXlfc2xpOiBmYWxzZQogIHBhc3M6CiAgLSBjcml0ZXJpYToKICAgIC0gPD0rMTAlCiAgc2xpOiBydF9zdmNfcDkwCiAgd2FybmluZzoKICAtIGNyaXRlcmlhOgogICAgLSA8PSsxMCUKICB3ZWlnaHQ6IDEKc3BlY192ZXJzaW9uOiAwLjEuMAp0b3RhbF9zY29yZToKICBwYXNzOiA5MCUKICB3YXJuaW5nOiA3NSUK","timeEnd":"2020-05-25T16:17:28.529Z","timeStart":"2020-05-25T16:07:28.529Z"},"labels":{"gaterun":"1","type":"viacli"},"project":"qgproject","result":"fail","service":"evalservice","stage":"qualitystage","teststrategy":"manual"},"id":"fa8f0c4b-4de7-4a0f-870f-175fdbcd6d33","source":"lighthouse-service","specversion":"0.2","time":"2020-05-25T16:19:29.349Z","type":"sh.keptn.events.evaluation-done","shkeptncontext":"f628eb68-849c-4e77-ab69-a504af34a081"}
+```
+
+3. Lets validate quality gate in bridge
+
+If everything works as expected we should be able to see the result in the Keptn's bridge
+
+
+
+4. Lets evaluate the event in Dynatrace
+
+Thanks to the Dynatrace integration that also pushes events to Dynatrace we should also see Quality Gate result in Dynatrace when navigating to our service.
+
+
diff --git a/site/tutorials/snippets/11/quality-gate-only/simplenode/setupBasicQualityGate.md b/site/tutorials/snippets/11/quality-gate-only/simplenode/setupBasicQualityGate.md
new file mode 100644
index 00000000..4ad7756b
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gate-only/simplenode/setupBasicQualityGate.md
@@ -0,0 +1,106 @@
+
+## Set up a basic quality gate for evalservice
+Duration: 4:00
+
+Keptn Quality Gates are based on the concepts of
+* SLIs (Service Level Indicators): what metrics (=indicators) are important and how do we query them
+* SLOs (Service Level Objectives): what conditions (=objectives) must be met to consider this a good or a bad value per indicator
+
+In Keptn we therefore need to provide an `sli.yaml` that defines how to query certain metrics from a specific tool, e.g: Dynatrace. We also need to provide an `slo.yaml` that defines the conditions - this file is tool independent.
+To learn more about the *sli.yaml* and *slo.yaml* files, go to [Specifications for Site Reliability Engineering with Keptn](https://github.com/keptn/spec/blob/0.1.3/sre.md).
+
+Our example comes with a basic and an extended set of SLIs and SLOs. In this step we focus on the basic version.
+We have to upload two files using the [add-resource](https://keptn.sh/docs/0.10.x/reference/cli/#keptn-add-resource) command.
+Ensure you navigate to the `examples/simplenodeservice/quality-gate-only` folder.
+
+1. First, lets upload our `dynatrace/sli_basic.yaml` as `dynatrace/sli.yaml`!
+
+```
+keptn add-resource --project=qgproject --stage=qualitystage --service=evalservice --resource=dynatrace/sli_basic.yaml --resourceUri=dynatrace/sli.yaml
+```
+
+This Dynatrace specific SLI contains the definition of 5 indicators. Each indicator has a logical name, e.g: throughput and the tool specific query, e.g: Dynatrace Metrics Query. You can also see that the query definition can leverage placeholders such as $SERVICE (there are more of course). In our case - because we keep it really simply we only use the $SERVICE placeholder telling Keptn to only query data from those SERVICE entities that have a tag applied with the name of our Keptn Service. This will be *evalservice*:
+```
+---
+spec_version: '1.0'
+indicators:
+ throughput_svc: "metricSelector=builtin:service.requestCount.total:merge(\"dt.entity.service\"):sum&entitySelector=tag($SERVICE),type(SERVICE)"
+ error_rate_svc: "metricSelector=builtin:service.errors.total.rate:merge(\"dt.entity.service\"):avg&entitySelector=tag($SERVICE),type(SERVICE)"
+ rt_svc_p50: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(50)&entitySelector=tag($SERVICE),type(SERVICE)"
+ rt_svc_p90: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(90)&entitySelector=tag($SERVICE),type(SERVICE)"
+ rt_svc_p95: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(95)&entitySelector=tag($SERVICE),type(SERVICE)"
+```
+
+2. Second, lets upload our `slo_basic.yaml` as `slo.yaml`
+
+```
+keptn add-resource --project=qgproject --stage=qualitystage --service=evalservice --resource=slo_basic.yaml --resourceUri=slo.yaml
+```
+
+This `slo.yaml` defines the objectives and references the SLIs defined in the `sli.yaml`:
+
+```
+---
+spec_version: '0.1.0'
+comparison:
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ aggregate_function: avg
+objectives:
+ - sli: response_time_p95
+ pass: # pass if (relative change <= 10% AND absolute value is < 500)
+ - criteria:
+ - "<=+10%" # relative values require a prefixed sign (plus or minus)
+ - "<600" # absolute values only require a logical operator
+ warning: # if the response time is below 800ms, the result should be a warning
+ - criteria:
+ - "<=800"
+ - sli: throughput
+ pass:
+ - criteria:
+ - ">4000"
+ - sli: error_rate
+ weight: 2
+ pass:
+ - criteria:
+ - "<=1%"
+ warning:
+ - criteria:
+ - "<=2%"
+ - sli: response_time_p50
+ - sli: response_time_p90
+ pass:
+ - criteria:
+ - "<=+10%"
+ warning:
+ - criteria:
+ - "<=+10%"
+total_score:
+ pass: "90%"
+ warning: "75%"
+```
+
+## Dynatrace Event Configuration
+Duration: 1:00
+
+Keptn not only provides an integration with Dynatrace to pull SLIs. It also provides an integration where Keptn can push events to Dynatrace, e.g: when a Quality Gate was evaluated it can send that information to a specific set of Dynatrace entities by leveraging the Dynatrace Events API.
+By default the Dynatrace Keptn Service assumes that monitored services in Dynatrace are deployed by Keptn and therefore tagged with 4 specific tags: keptn_project, keptn_service, keptn_stage & keptn_deployment. In a scenario where Keptn doesnt manage the deployment we cannot assume these tags are there. This is why we have to tell the Dynatrace Keptn Service to which entities the events should be sent to. This configuration can be provided by uploading a *dynatrace.conf.yaml* which includes the actual tag rules. In our case its very simply. First the command to upload the dynatrace.conf.yaml:
+
+```
+keptn add-resource --project=qgproject --stage=qualitystage --service=evalservice --resource=dynatrace/dynatrace.conf.yaml --resourceUri=dynatrace/dynatrace.conf.yaml
+```
+
+And here is the content that is in there - showing you that you can use the Keptn to Dynatrace integration to let Keptn send events to any type of monitored service - not only those that are deployed by Keptn:
+
+```
+---
+spec_version: '0.1.0'
+dtCreds: dynatrace
+attachRules:
+ tagRule:
+ - meTypes:
+ - SERVICE
+ tags:
+ - context: CONTEXTLESS
+ key: $SERVICE
+```
diff --git a/site/tutorials/snippets/11/quality-gate-only/tagEvalservice.md b/site/tutorials/snippets/11/quality-gate-only/tagEvalservice.md
new file mode 100644
index 00000000..8c27a578
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gate-only/tagEvalservice.md
@@ -0,0 +1,11 @@
+## Tag service for quality gate evaluation
+Duration: 2:00
+
+Whether you are using our sample application or your own application, in order for the quality gate evaluation to work we need to properly tag the service. This is important as SLI (Service Level Indicators) will be queried from Dynatrace only from entities that match a certain tag. In our case it will add a tag that matches the name of of the Keptn Service which is *evalservice*. In Dynatrace there are multiple options to put a tag on a service:
+* [Manual Tagging](https://www.dynatrace.com/support/help/how-to-use-dynatrace/tags-and-metadata/setup/how-to-define-tags/): put them on service and the process group via the UI or the Dynatrace API
+* [Automated Tagging](https://www.dynatrace.com/support/help/how-to-use-dynatrace/tags-and-metadata/setup/how-to-define-tags/): define a rule that tag entities based on existing meta data
+* Cloud & Platform Tags: extract tags & annotations from [k8s](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/other-deployments-and-configurations/leverage-tags-defined-in-kubernetes-deployments/), [OpenShift](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/openshift/other-deployments-and-configurations/leverage-tags-defined-in-openshift-deployments/) or [Cloud Foundry](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/cloud-foundry/other-deployments-and-configurations/leverage-tags-defined-in-cloud-foundry-deployments/)
+* [Environment Tags](https://www.dynatrace.com/support/help/how-to-use-dynatrace/tags-and-metadata/setup/define-tags-based-on-environment-variables/): specify tags via the DT_TAGS environment variable
+
+In our example we simply go the manual tagging route which means we simply add a tag called *evalservice* to the deployed service via the Dynatrace UI like shown below:
+
\ No newline at end of file
diff --git a/site/tutorials/snippets/11/quality-gates/setupQualityGate-crc.md b/site/tutorials/snippets/11/quality-gates/setupQualityGate-crc.md
new file mode 100644
index 00000000..c6a9b528
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gates/setupQualityGate-crc.md
@@ -0,0 +1,165 @@
+
+## Set up the quality gate
+Duration: 4:00
+
+Keptn requires a performance specification for the quality gate. This specification is described in a file called `slo.yaml`, which specifies a Service Level Objective (SLO) that should be met by a service. To learn more about the *slo.yaml* file, go to [Specifications for Site Reliability Engineering with Keptn](https://github.com/keptn/spec/blob/master/service_level_objective.md).
+
+Activate the quality gates for the carts service. Therefore, navigate to the `examples/onboarding-carts` folder and upload the `slo-quality-gates.yaml` file using the [add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/) command:
+
+Make sure you are in the correct folder `examples/onboarding-carts`. If not, change the directory accordingly, e.g., `cd ../../onboarding-carts`.
+
+
+
+```
+keptn add-resource --project=sockshop --stage=staging --service=carts --resource=slo-quality-gates.yaml --resourceUri=slo.yaml
+```
+
+This will add the `SLO.yaml` file to your Keptn - which is the declarative definition of a quality gate. Let's take a look at the file contents:
+
+```
+---
+spec_version: "1.0"
+comparison:
+ aggregate_function: "avg"
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ number_of_comparison_results: 1
+filter:
+objectives:
+ - sli: "response_time_p95"
+ key_sli: false
+ pass: # pass if (relative change <= 10% AND absolute value is < 600ms)
+ - criteria:
+ - "<=+10%" # relative values require a prefixed sign (plus or minus)
+ - "<600" # absolute values only require a logical operator
+ warning: # if the response time is below 800ms, the result should be a warning
+ - criteria:
+ - "<=800"
+ weight: 1
+total_score:
+ pass: "90%"
+ warning: "75%"
+```
+
+## Verify current version
+Duration: 3:00
+
+You can take a look at the currently deployed version of our "carts" microservice before we deploy the next build of our microservice.
+
+1. Get the URL for your carts service with the following commands in the respective stages:
+
+ - [http://carts.sockshop-dev.apps-crc.testing](http://carts.sockshop-dev.apps-crc.testing)
+ - [http://carts.sockshop-staging.apps-crc.testing](http://carts.sockshop-staging.apps-crc.testing)
+ - [http://carts.sockshop-production.apps-crc.testing](http://carts.sockshop-production.apps-crc.testing)
+
+
+2. Navigate to `http://carts.sockshop-production.YOUR.DOMAIN` for viewing the carts service in your **production** environment and you should receive an output similar to the following:
+
+
+
+
+## Deploy a slow build version
+Duration: 5:00
+
+
+1. Use the Keptn CLI to deploy a version of the carts service, which contains an artificial **slowdown of 1 second** in each request.
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts --image=docker.io/keptnexamples/carts --tag=0.12.2
+ ```
+
+
+
+1. Go ahead and verify that the slow build has reached your `dev` and `staging` environments by opening a browser for both environments. Get the URLs with these commands:
+
+ - [http://carts.sockshop-dev.apps-crc.testing](http://carts.sockshop-dev.apps-crc.testing)
+ - [http://carts.sockshop-staging.apps-crc.testing](http://carts.sockshop-staging.apps-crc.testing)
+
+
+
+
+
+
+
+## Quality gate in action
+Duration: 7:00
+
+After triggering the deployment of the carts service in version v0.12.2, the following status is expected:
+
+* **Dev stage:** The new version is deployed in the dev stage and the functional tests passed.
+ * To verify, open a browser and navigate to [http://carts.sockshop-dev.apps-crc.testing](http://carts.sockshop-dev.apps-crc.testing)
+
+* **Staging stage:** In this stage, version v0.12.2 will be deployed and the performance test starts to run for about 10 minutes. After the test is completed, Keptn triggers the test evaluation and identifies the slowdown. Consequently, a roll-back to version v0.12.1 in this stage is conducted and the promotion to production is not triggered.
+
+
+* **Production stage:** The slow version is **not promoted** to the production stage because of the active quality gate in place. Thus, still version v0.12.1 is expected to be in production.
+ * To verify, navigate to [http://carts.sockshop-production.apps-crc.testing](http://carts.sockshop-production.apps-crc.testing)
+
+
+## Verify the quality gate in Keptn's Bridge
+Duration: 3:00
+
+Take a look in the Keptn's bridge and navigate to the last deployment. You will find a quality gate evaluation that got a `fail` result when evaluation the SLOs of our carts microservice. Thanks to this quality gate the slow build won't be promoted to production but instead automatically rolled back.
+
+To verify, the [Keptn's Bridge](https://keptn.sh/docs/0.10.x/reference/bridge/) shows the deployment of v0.12.2 and then the failed test in staging including the roll-back.
+
+
+
+
+
+## Deploy a regular carts version
+Duration: 3:00
+
+1. Use the Keptn CLI to send a new version of the *carts* artifact, which does **not** contain any slowdown:
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts --image=docker.io/keptnexamples/carts --tag=0.12.3
+ ```
+
+
+
+1. To verify the deployment in *production* (it may take a couple of minutes), open a browser and navigate to the carts service in your production environment. As a result, you see `Version: v3`.
+
+
+1. Besides, you can verify the deployments in your Kubernetes cluster using the following commands:
+
+
+ ```
+ kubectl get deployments -n sockshop-production
+ ```
+
+ ```
+ NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
+ carts-db 1 1 1 1 63m
+ carts-primary 1 1 1 1 98m
+ ```
+
+
+
+ ```
+ kubectl describe deployment carts-primary -n sockshop-production
+ ```
+
+ ```
+ ...
+ Pod Template:
+ Labels: app=carts-primary
+ Containers:
+ carts:
+ Image: docker.io/keptnexamples/carts:0.12.3
+ ```
+
+1. Take another look into the Keptn's Bridge and you will see this new version passed the quality gate and thus, is now running in production!
diff --git a/site/tutorials/snippets/11/quality-gates/setupQualityGate.md b/site/tutorials/snippets/11/quality-gates/setupQualityGate.md
new file mode 100644
index 00000000..7f47efe2
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gates/setupQualityGate.md
@@ -0,0 +1,187 @@
+
+## Set up the quality gate
+Duration: 4:00
+
+Keptn requires a performance specification for the quality gate. This specification is described in a file called `slo.yaml`, which specifies a Service Level Objective (SLO) that should be met by a service. To learn more about the *slo.yaml* file, go to [Specifications for Site Reliability Engineering with Keptn](https://github.com/keptn/spec/blob/master/service_level_objective.md).
+
+To activate the quality gates for the carts service, navigate to the `examples/onboarding-carts` folder and upload the `slo-quality-gates.yaml` file using the [add-resource](https://keptn.sh/docs/0.10.x/reference/cli/commands/keptn_add-resource/) command:
+
+
+
+```
+keptn add-resource --project=sockshop --stage=staging --service=carts --resource=slo-quality-gates.yaml --resourceUri=slo.yaml
+```
+
+This will add the `SLO.yaml` file to your Keptn - which is the declarative definition of a quality gate. Let's take a look at the file contents:
+
+```
+---
+spec_version: "1.0"
+comparison:
+ aggregate_function: "avg"
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ number_of_comparison_results: 1
+filter:
+objectives:
+ - sli: "response_time_p95"
+ key_sli: false
+ pass: # pass if (relative change <= 10% AND absolute value is < 600ms)
+ - criteria:
+ - "<=+10%" # relative values require a prefixed sign (plus or minus)
+ - "<600" # absolute values only require a logical operator
+ warning: # if the response time is below 800ms, the result should be a warning
+ - criteria:
+ - "<=800"
+ weight: 1
+total_score:
+ pass: "90%"
+ warning: "75%"
+```
+
+## Verify current version
+Duration: 3:00
+
+You can take a look at the currently deployed version of our "carts" microservice before we deploy the next build of our microservice.
+
+1. Get the URL for your carts service with the following commands in the respective stages:
+
+
+ ```
+ echo http://carts.sockshop-dev.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+
+ ```
+ echo http://carts.sockshop-staging.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+
+ ```
+ echo http://carts.sockshop-production.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+2. Navigate to `http://carts.sockshop-production.YOUR.DOMAIN` for viewing the carts service in your **production** environment and you should receive an output similar to the following:
+
+
+
+
+## Deploy a slow build version
+Duration: 5:00
+
+
+1. Use the Keptn CLI to deploy a version of the carts service, which contains an artificial **slowdown of 1 second** in each request.
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts --image=docker.io/keptnexamples/carts --tag=0.12.2
+ ```
+
+
+
+1. Verify that the slow build has reached your `dev` and `staging` environments by opening a browser for both environments. This may take 5 to 10 minutes. Get the URLs with these commands:
+
+
+ ```
+ echo http://carts.sockshop-dev.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+
+ ```
+ echo http://carts.sockshop-staging.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+
+
+
+
+
+
+## Quality gate in action
+Duration: 7:00
+
+After triggering the deployment of the carts service in version v0.12.2, the following status is expected:
+
+* **Dev stage:** The new version is deployed in the dev stage and the functional tests passed.
+ * To verify, open a browser and navigate to:
+
+ ```
+ echo http://carts.sockshop-dev.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+* **Staging stage:** In this stage, version v0.12.2 will be deployed and the performance test starts to run for about 10 minutes. After the test is completed, Keptn triggers the test evaluation and identifies the slowdown. Consequently, a roll-back to version v0.12.1 in this stage is conducted and the promotion to production is not triggered.
+
+
+* **Production stage:** The slow version is **not promoted** to the production stage because of the active quality gate in place. Thus, still version v0.12.1 is expected to be in production.
+ * To verify, navigate to:
+
+ ```
+ echo http://carts.sockshop-production.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+## Verify the quality gate in Keptn's Bridge
+Duration: 3:00
+
+Take a look in the Keptn's bridge and navigate to the last deployment. You will find a quality gate evaluation that got a `fail` result when evaluation the SLOs of our carts microservice. Thanks to this quality gate the slow build won't be promoted to production but instead automatically rolled back.
+
+To verify, the [Keptn's Bridge](https://keptn.sh/docs/0.10.x/reference/bridge/) shows the deployment of v0.12.2 and then the failed test in staging including the roll-back.
+
+
+
+
+
+## Deploy a regular carts version
+Duration: 3:00
+
+1. Use the Keptn CLI to send a new version of the *carts* artifact, which does **not** contain any slowdown:
+
+
+ ```
+ keptn trigger delivery --project=sockshop --service=carts --image=docker.io/keptnexamples/carts --tag=0.12.3
+ ```
+
+
+
+1. To verify the deployment in *production* (it may take a couple of minutes), open a browser and navigate to the carts service in your production environment. As a result, you see `Version: v3`.
+
+
+1. Besides, you can verify the deployments in your Kubernetes cluster using the following commands:
+
+
+ ```
+ kubectl get deployments -n sockshop-production
+ ```
+
+ ```
+ NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
+ carts-db 1 1 1 1 63m
+ carts-primary 1 1 1 1 98m
+ ```
+
+
+
+ ```
+ kubectl describe deployment carts-primary -n sockshop-production
+ ```
+
+ ```
+ ...
+ Pod Template:
+ Labels: app=carts-primary
+ Containers:
+ carts:
+ Image: docker.io/keptnexamples/carts:0.12.3
+ ```
+
+1. Take another look into the Keptn's Bridge and you will see this new version passed the quality gate and thus, is now running in production!
diff --git a/site/tutorials/snippets/11/quality-gates/simplenode/extendQualityGatesWithTestMetrics.md b/site/tutorials/snippets/11/quality-gates/simplenode/extendQualityGatesWithTestMetrics.md
new file mode 100644
index 00000000..c9d14a1f
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gates/simplenode/extendQualityGatesWithTestMetrics.md
@@ -0,0 +1,85 @@
+
+## Extend Quality Gates with Test Step Metrics
+Duration: 5:00
+
+Our quality gates so far are based on 5 basic metrics: throughput, error rate, response time (p50, p90, p95).
+While this is a great start we can do much more!!
+
+Dynatrace gives us the option to extract context information from requests that are executed by test tools. Such context information could be the Test Script Name (load.jmx), Test Scenario Name (fullscenario), Test Step Name (homepage, echo, invoke, version). This information can be passed by the Test Tool using an HTTP Header that can be analyzed by Dynatrace as requests come in. Here is such an example header
+```
+X-Dynatrace-Test: LTN=performance_build1;LSN=Test Scenario;TSN=homepage;
+```
+
+The JMeter test file we uploaded - `load.jmx` has already been adjusted so that it sends these HTTP Headers including information such as Test Step Name (TSN) for every of the 4 test steps it executes: homepage, version, api, invoke
+
+If we want to extend our SLIs with metrics such as "Response Time for Invoke", "Response Time for Homepage" or "Number of backend microservice calls for Invoke" ... we need to do two things
+#1: Create Request Attributes that tell Dynatrace to extract these HTTP Header values
+#2: Create Calculated Service Metrics that will give us new metrics split by Test Name
+
+The following image shows how this all plays together:
+
+
+Good news is that we can fully automate the configuration of Request Attributes and Calculated Service Metrics through the Dynatrace API. We have two scripts that does this for us. Please make sure you navigate into the *examples\simplenodeservice\dynatrace* folder. Here we execute the following scripts:
+```
+./createTestRequestAttributes.sh
+./createTestStepCalculatedMetrics.sh CONTEXTLESS keptn_project
+```
+
+The first script will create but not overwrite the Request Attribute rules for TSN (Test Step Name), LTN (Load Test Name) & LSN (Load Script Name)
+The second script will create but not overwrite the following Calculated Service Metrics:
+
+
+| Name | MetricId |
+| --- | --------- |
+| Test Step Response Time | calc:service.teststepresponsetime |
+| Test Step Service Calls | calc:service.teststepservicecalls |
+| Test Step DB Calls | calc:service.teststepdbcalls |
+| Test Step Failure Rate | calc:service.teststepfailurerate |
+| Test Requests by HTTP Status | calc:service.testrequestsbyhttpstatus |
+| Test Step CPU | calc:service.teststepcpu |
+| Test Step DB Calls | calc:service.teststepdbcalls |
+
+From now on - everytime Keptn executes these JMeter tests we will have new metrics available that provide a data dimension for each Test Step Name.
+
+This also allows us to extend our SLIs with these metric definitions. In our examples we therefore have a `sli_perftest.yaml` and also a `slo_perftest.yaml` that include these new metrics.
+Make sure you navigate to the *examples\simplenodeservice\keptn* directory. Now:
+
+1. First, lets upload our `dynatrace/sli_perftest.yaml` as `dynatrace/sli.yaml` for staging!
+
+We could upload these new sli files with the extended indicators to both staging and production. But - in order to show you that we can have different SLIs and SLOs in each stage we just upload it to staging.
+
+```
+keptn add-resource --project=simplenodeproject --stage=staging --service=simplenode --resource=dynatrace/sli_perftest.yaml --resourceUri=dynatrace/sli.yaml
+```
+
+Please explore the sli_perftest.yaml file yourself to see the new queries. For reference here are two of the queries that show you how the Dynatrace Metrics API allows us to query calculated service metrics for individual dimensions (e.g: Test Name):
+
+```
+ rt_test_version: "metricSelector=calc:service.teststepresponsetime:filter(eq(Test Step,version)):merge(\"dt.entity.service\"):avg&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+ rt_test_homepage: "metricSelector=calc:service.teststepresponsetime:filter(eq(Test Step,homepage)):merge(\"dt.entity.service\"):avg&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+```
+
+2. Second, lets upload our `slo_perftest.yaml` as `slo.yaml`
+
+Same as with the SLI. We just upload it to the staging as this file now defines objectives for the new indicators defined in the SLI.
+
+```
+keptn add-resource --project=simplenodeproject --stage=staging --service=simplenode --resource=slo_perftest.yaml --resourceUri=slo.yaml
+```
+
+## Deploy build with extended Test Step Metrics Quality Gate
+Duration: 5:00
+
+Let's go back to build 1.0.0 and deploy it again. What we should see is that Keptn will query all these additional test step specific metrics for the quality gate evaluation in staging.
+
+1. Lets deploy build number 1.0.0 again
+
+```
+keptn send event new-artifact --project=simplenodeproject --service=simplenode --image=grabnerandi/simplenodeservice --tag=1.0.0
+```
+
+2. Lets validate quality gate in bridge:
+
+What you should see are all these new SLIs showing up in the bridge!
+
+
\ No newline at end of file
diff --git a/site/tutorials/snippets/11/quality-gates/simplenode/setupBasicQualityGate.md b/site/tutorials/snippets/11/quality-gates/simplenode/setupBasicQualityGate.md
new file mode 100644
index 00000000..92a5286b
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gates/simplenode/setupBasicQualityGate.md
@@ -0,0 +1,121 @@
+
+## Set up a basic quality gate
+Duration: 4:00
+
+Keptn Quality Gates are based on the concepts of
+* SLIs (Service Level Indicators): what metrics (=indicators) are important and how do we query them
+* SLOs (Service Level Objectives): what conditions (=objectives) must be met to consider this a good or a bad value per indicator
+
+In Keptn we therefore need to provide an `sli.yaml` that defines how to query certain metrics from a specific tool, e.g: Dynatrace. We also need to provide an `slo.yaml` that defines the conditions - this file is tool independent.
+To learn more about the *sli.yaml* and *slo.yaml* files, go to [Specifications for Site Reliability Engineering with Keptn](https://github.com/keptn/spec/blob/0.1.3/sre.md).
+
+Our example comes with a basic and an extended set of SLIs and SLOs. In this step we focus on the basic version.
+We have to upload two files using the [add-resource](https://keptn.sh/docs/0.10.x/reference/cli/#keptn-add-resource) command.
+Ensure you navigate to the `examples/simplenode/keptn` folder.
+
+1. First, lets upload our `dynatrace/sli_basic.yaml` as `dynatrace/sli.yaml`!
+
+```
+keptn add-resource --project=simplenodeproject --stage=staging --service=simplenode --resource=dynatrace/sli_basic.yaml --resourceUri=dynatrace/sli.yaml
+```
+
+This Dynatrace specific SLI contains the definition of 5 indicators. Each indicator has a logical name, e.g: throughput and the tool specific query, e.g: Dynatrace Metrics Query. You can also see that the query definition can leverage placeholders such as $PROJECT, $SERVICE, $STAGE ... - this is great as we can use them to filter on exactly those services managed by Keptn as long as these tags are put on the Dynatrace entities:
+```
+---
+spec_version: '1.0'
+indicators:
+ throughput: "metricSelector=builtin:service.requestCount.total:merge(\"dt.entity.service\"):sum&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+ error_rate: "metricSelector=builtin:service.errors.total.rate:merge(\"dt.entity.service\"):avg&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+ response_time_p50: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(50)&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+ response_time_p90: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(90)&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+ response_time_p95: "metricSelector=builtin:service.response.time:merge(\"dt.entity.service\"):percentile(95)&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+```
+
+2. Second, lets upload our `slo_basic.yaml` as `slo.yaml`
+
+```
+keptn add-resource --project=simplenodeproject --stage=staging --service=simplenode --resource=slo_basic.yaml --resourceUri=slo.yaml
+```
+
+This `slo.yaml` defines the objectives and references the SLIs defined in the `sli.yaml`:
+
+```
+---
+spec_version: '0.1.0'
+comparison:
+ compare_with: "single_result"
+ include_result_with_score: "pass"
+ aggregate_function: avg
+objectives:
+ - sli: response_time_p95
+ pass: # pass if (relative change <= 10% AND absolute value is < 500)
+ - criteria:
+ - "<=+10%" # relative values require a prefixed sign (plus or minus)
+ - "<600" # absolute values only require a logical operator
+ warning: # if the response time is below 800ms, the result should be a warning
+ - criteria:
+ - "<=800"
+ - sli: throughput
+ pass:
+ - criteria:
+ - ">4000"
+ - sli: error_rate
+ weight: 2
+ pass:
+ - criteria:
+ - "<=1%"
+ warning:
+ - criteria:
+ - "<=2%"
+ - sli: response_time_p50
+ - sli: response_time_p90
+ pass:
+ - criteria:
+ - "<=+10%"
+ warning:
+ - criteria:
+ - "<=+10%"
+total_score:
+ pass: "90%"
+ warning: "75%"
+```
+
+## Adding Basic Tests for Quality Gate
+Duration: 3:00
+
+Uploading SLIs & SLOs alone is not enough. What we need are some tests, e.g: simple API performance tests that get executed by Keptn. After those tests are executed Keptn will evaluate the SLIs/SLOs for the timeframe of the test execution.
+
+Keptn comes with a JMeter-Service that can execute JMeter tests when a new deployment happened. In our tutorial we are however using the JMeter-Extended-Service as it gives us some more flexibility with different workloads.
+1. We simply "upgrade" from JMeter-Service to JMeter-Extended-Service by replacing the image:
+
+```
+kubectl -n keptn set image deployment/jmeter-service jmeter-service=keptncontrib/jmeter-extended-service:0.1.0
+```
+
+Now we are ready to upload a test script and workload configuration for our staging stage. Ensure you navigate to the `examples/simplenode/keptn` folder.
+2. Add load test script & workload config to our staging stage
+```
+keptn add-resource --project=simplenodeproject --stage=staging --service=simplenode --resource=jmeter/load.jmx --resourceUri=jmeter/load.jmx
+```
+
+```
+keptn add-resource --project=simplenodeproject --stage=staging --service=simplenode --resource=jmeter/jmeter.conf.yaml --resourceUri=jmeter/jmeter.conf.yaml
+```
+
+## Deploy first build with Tests & Quality Gates
+Duration: 5:00
+
+As we have now uploaded tests, SLIs & SLOs we can run the same artifact of version 1.0.0 through the delivery pipeline. The difference now is that Keptn will automatically execute tests in staging and then evaluates our indicators (specified in SLI.yaml) against our objectives (specified in SLO.yaml) for the timeframe of the test execution.
+
+1. Lets deploy build number 1.0.0 again
+
+```
+keptn send event new-artifact --project=simplenodeproject --service=simplenode --image=grabnerandi/simplenodeservice --tag=1.0.0
+```
+
+2. Lets validate quality gate in bridge:
+
+Remember - this time the deployment will take a bit longer as the tests take about 2-3 minutes to run before Keptn can pull in metrics from Dynatrace. Overall a deployment will now take about 5 minutes. Go back to the Keptn's Bridge and watch for the new events coming in. In a couple of minutes you will also see the evaluation results of your Quality Gate. Lets hope all is green and the build makes it all the way into production :-)
+
+
+
diff --git a/site/tutorials/snippets/11/quality-gates/simplenode/setupQualityGateInProd.md b/site/tutorials/snippets/11/quality-gates/simplenode/setupQualityGateInProd.md
new file mode 100644
index 00000000..c1f4210b
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gates/simplenode/setupQualityGateInProd.md
@@ -0,0 +1,41 @@
+
+## Set up a quality gate in production
+Duration: 1:00
+
+So far we have uploaded our test script, test workload and our SLI & SLO for our staging stage.
+If we also want a quality gate to be enforced after a blue/green deployment is done to production to validate if the production deployment is good enough or whether the Blue/Green deployment should be reverted back we have to add SLI.yaml, SLO.yaml and our tests for production as well.
+
+1. First, lets upload our `dynatrace/sli_basic.yaml` as `dynatrace/sli.yaml` for prod!
+
+We could upload a different sli.yaml for production than the one we have for staging. In a real scenario you probably want this as you may want to include additional indicators from other parts of the infrastructure that you didnt have available in staging. For our sample we just use the same `sli_basic.yaml`!
+
+```
+keptn add-resource --project=simplenodeproject --stage=prod --service=simplenode --resource=dynatrace/sli_basic.yaml --resourceUri=dynatrace/sli.yaml
+```
+
+If you wonder - how can the same SLI be working in production? Well - its because the SLI is leveraging the placeholders such as $STAGE. Once Keptn will evaluate the SLIs for production this value will be replaced with `prod`. And - as long as all services are correctly tagged in Dynatrace with e.g: `keptn_stage:prod` we are all good.
+
+Here is one of the indicator definitions of this SLI file so you see what I mean:
+
+```
+indicators:
+ throughput: "metricSelector=builtin:service.requestCount.total:merge(\"dt.entity.service\"):sum&entitySelector=tag(keptn_project:$PROJECT),tag(keptn_stage:$STAGE),tag(keptn_service:$SERVICE),tag(keptn_deployment:$DEPLOYMENT),type(SERVICE)"
+```
+
+2. Second, lets upload our `slo_basic.yaml` as `slo.yaml`
+
+Same as with the SLI. We could upload a different SLO that includes different objectives for production, e.g: you may expect different load behavior or you have different hardware your system runs on. In that case you would adjust the SLOs to reflect what you expect in production. For our sample we just take the same SLO that we used for staging
+
+```
+keptn add-resource --project=simplenodeproject --stage=prod --service=simplenode --resource=slo_basic.yaml --resourceUri=slo.yaml
+```
+
+3. Third, lets upload our tests
+
+In order for the quality gates to evaluate a representative timeframe with representative load we will upload tests to production. This will make sure that after Keptn deploys the artifact in production that these tests get executed against the new Blue/Green deployment. After that the quality gate kicks in. If the validation succeeds Keptn will keep the new build - otherwise it will roll back.
+We will use the same test scripts as in staging. We could use different tests - but - for our example that's good enough!
+
+```
+keptn add-resource --project=simplenodeproject --stage=prod --service=simplenode --resource=jmeter/load.jmx --resourceUri=jmeter/load.jmx
+keptn add-resource --project=simplenodeproject --stage=prod --service=simplenode --resource=jmeter/jmeter.conf.yaml --resourceUri=jmeter/jmeter.conf.yaml
+```
diff --git a/site/tutorials/snippets/11/quality-gates/simplenode/validateQualityGatesWithMultipleDeployments.md b/site/tutorials/snippets/11/quality-gates/simplenode/validateQualityGatesWithMultipleDeployments.md
new file mode 100644
index 00000000..14c4c74a
--- /dev/null
+++ b/site/tutorials/snippets/11/quality-gates/simplenode/validateQualityGatesWithMultipleDeployments.md
@@ -0,0 +1,33 @@
+## Run more builds - validate all quality gates
+Duration: 5:00
+
+In the last steps we finished setting up tests and quality gates for both staging and production.
+Now its time to put this to the test. If you remember - the samplenodeservice app comes with 4 different builds. Every build has a unique characteristic, e.g: some builds are good all the way to production, some builds have a high failure rate and should be stopped by the staging quality gate, some builds are only problematic in production and should therefore be rolled back during a blue/green validation phase.
+
+Here is what we are going to do in this step. We are going to deploy build 2, 3 and then 4 and validate if Keptn catches all problems as highlighted in the next image:
+
+
+1. Let's deploy build 2.0.0
+
+```
+keptn send event new-artifact --project=simplenodeproject --service=simplenode --image=grabnerandi/simplenodeservice --tag=2.0.0
+```
+
+Watch the bridge and see if build 2.0.0 is stopped by the quality gate. It should - as build 2.0.0 has a high failure rate which is detected by the SLI error_rate!
+
+2. Let's deploy build 3.0.0
+
+```
+keptn send event new-artifact --project=simplenodeproject --service=simplenode --image=grabnerandi/simplenodeservice --tag=3.0.0
+```
+
+Watch the bridge and see if build 3.0.0 makes it all the way to production and stays there. It should - as build 3.0.0 has no high failure rate any longer and also doesnt show any other signs of problems. As we have production quality gates enabled as well you should also see tests being executed in production followed by quality gate evaluation.
+
+3. Let's deploy build 4.0.0
+
+```
+keptn send event new-artifact --project=simplenodeproject --service=simplenode --image=grabnerandi/simplenodeservice --tag=4.0.0
+```
+
+Watch the bridge and see if build 4.0.0 makes it all the way to production and is then rejected and rolled back to build 3.0.0. Build 4 should pass the quality gate in staging as the problem that is built into 4.0.0 only shows up in production. This is why you should see the build being promoted in production. But - after the tests are executed and evaluation fails Keptn will automatically roll it back to Build 3 in production. You can also validate this by browsing to your app
+
diff --git a/site/tutorials/snippets/11/self-healing/featureFlagsDynatrace-crc.md b/site/tutorials/snippets/11/self-healing/featureFlagsDynatrace-crc.md
new file mode 100644
index 00000000..c09a5886
--- /dev/null
+++ b/site/tutorials/snippets/11/self-healing/featureFlagsDynatrace-crc.md
@@ -0,0 +1,223 @@
+## Self-healing with feature flags
+Duration: 2:00
+
+Next, you will learn how to use the capabilities of Keptn to provide self-healing for an application with feature flags based on the [Unleash feature toggle framework](https://unleash.github.io/).
+
+
+Positive
+: For the sake of this tutorial, we will create an Unleash Keptn project. The carts microservice is already pre-configured for this.
+
+To quickly get an Unleash server up and running with Keptn, follow these instructions:
+
+1. Make sure you are in the correct folder of your examples directory:
+
+
+
+
+ ```
+ cd examples/unleash-server
+ ```
+
+
+1. Create a new project using the `keptn create project` command:
+
+
+ ```
+ keptn create project unleash --shipyard=./shipyard.yaml
+ ```
+
+1. Create a unleash and unleash-db service using the `keptn create service` and `keptn add-resource` commands:
+
+
+ ```
+ keptn create service unleash-db --project=unleash
+ keptn add-resource --project=unleash --service=unleash-db --all-stages --resource=./unleash-db.tgz --resourceUri=helm/unleash-db.tgz
+
+ keptn create service unleash --project=unleash
+ keptn add-resource --project=unleash --service=unleash --all-stages --resource=./unleash.tgz --resourceUri=helm/unleash.tgz
+ ```
+
+1. Send new artifacts for unleash and unleash-db using the `keptn send new-artifact` command:
+
+
+ ```
+ keptn trigger delivery --project=unleash --service=unleash-db --image=postgres:10.4
+ keptn trigger delivery --project=unleash --service=unleash --image=docker.io/keptnexamples/unleash:1.0.0
+ ```
+
+
+
+1. Go to the URL (`unleash.unleash-dev.KEPTN_DOMAIN`): [http://unleash.unleash-dev.apps-crc.testing](http://unleash.unleash-dev.apps-crc.testing)
+
+1. Open the URL in your browser and log in using the following credentials:
+ * Username: keptn
+ * Password: keptn
+
+You should be able to login using the credentials *keptn/keptn*.
+
+## Configure the Unleash server
+Duration: 4:00
+
+In this tutorial, we are going to introduce feature toggles for two scenarios:
+
+1. Feature flag for a very simple caching mechanism that can speed up the delivery of the website since it skips the calls to the database but instead replies with static content.
+
+1. Feature flag for a promotional campaign that can be enabled whenever you want to run a promotional campaign on top of your shopping cart.
+
+To set up both feature flags, please use the following scripts to automatically generate the feature flags that we need in this tutorial.
+
+
+
+
+```
+export UNLEASH_TOKEN=$(echo -n keptn:keptn | base64)
+export UNLEASH_BASE_URL=$(echo http://unleash.unleash-dev.apps-crc.testing
+
+curl --request POST \
+ --url ${UNLEASH_BASE_URL}/api/admin/features/ \
+ --header "authorization: Basic ${UNLEASH_TOKEN}" \
+ --header 'content-type: application/json' \
+ --data '{
+ "name": "EnableItemCache",
+ "description": "carts",
+ "enabled": false,
+ "strategies": [
+ {
+ "name": "default",
+ "parameters": {}
+ }
+ ]
+}'
+
+curl --request POST \
+ --url ${UNLEASH_BASE_URL}/api/admin/features/ \
+ --header "authorization: Basic ${UNLEASH_TOKEN}" \
+ --header 'content-type: application/json' \
+ --data '{
+ "name": "EnablePromotion",
+ "description": "carts",
+ "enabled": false,
+ "strategies": [
+ {
+ "name": "default",
+ "parameters": {}
+ }
+ ]
+}'
+```
+
+### Optionally verify the generated feature flags
+
+If you want to verify the feature flags that have been created, please login to your Unleash server - or if you are already logged in - refresh the browser.
+
+
+
+
+## Configure Keptn
+Duration: 5:00
+
+Now, everything is set up in the Unleash server. For Keptn to be able to connect to the Unleash server, we have to add a secret with the Unleash API URL as well as the Unleash tokens.
+
+1. In order for Keptn to be able to use the Unleash API, we need to add the credentials as a secret to our Keptn namespace. In this tutorial, we do not have to change the values for UNLEASH_SERVER, UNLEASH_USER, and UNLEASH_TOKEN, but in your own custom scenario this might be needed to change it to your actual Unleash URL, user and token.
+As said, in this tutorial we can use the following command as it is:
+
+
+ ```
+ kubectl -n keptn create secret generic unleash --from-literal="UNLEASH_SERVER_URL=http://unleash.unleash-dev/api" --from-literal="UNLEASH_USER=keptn" --from-literal="UNLEASH_TOKEN=keptn"
+ ```
+
+1. Install the Unleash action provider which is responsible for acting upon an alert, thus it is the part that will actually resolve issues by changing the stage of the feature flags.
+
+
+ ```
+ kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/unleash-service/release-0.3.2/deploy/service.yaml
+ ```
+
+1. Switch to the carts example (`cd examples/onboarding-carts`) and add the following remediation instructions
+
+ ```
+ apiVersion: spec.keptn.sh/0.1.4
+ kind: Remediation
+ metadata:
+ name: carts-remediation
+ spec:
+ remediations:
+ - problemType: Response time degradation
+ actionsOnOpen:
+ - action: toggle-feature
+ name: Toogle feature flag
+ description: Toogle feature flag EnableItemCache to ON
+ value:
+ EnableItemCache: "on"
+ - problemType: Failure rate increase
+ actionsOnOpen:
+ - action: toggle-feature
+ name: Toogle feature flag
+ description: Toogle feature flag EnablePromotion to OFF
+ value:
+ EnablePromotion: "off"
+ ```
+
+ using the following command. Please make sure you are in the correct folder `examples/onboarding-carts`.
+
+
+
+
+ ```
+ keptn add-resource --project=sockshop --service=carts --stage=production --resource=remediation_feature_toggle.yaml --resourceUri=remediation.yaml
+ ```
+
+ **Note:** The file describes remediation actions (e.g., `featuretoggle`) in response to problems/alerts (e.g., `Response time degradation`) that are sent to Keptn.
+
+1. We are also going to add an SLO file so that Keptn can evaluate if the remediation action was successful.
+
+ ```
+ keptn add-resource --project=sockshop --stage=production --service=carts --resource=slo-self-healing-dynatrace.yaml --resourceUri=slo.yaml
+ ```
+
+1. Start the load generation script for this use case:
+
+ ```
+ kubectl apply -f ../load-generation/cartsloadgen/deploy/cartsloadgen-prod.yaml
+ ```
+
+Positive
+: Please note that in a production environment you would have to set it up differently. We have deployed Unleash only in a single-stage environment while we have our application that we manage with Unleash in a multi-stage environment. This was done for the sake of resource-saving.
+That means, in our example, the Unleash server will control the behavior for all three stages (this is what you would probably not want in a production environment). Therefore, we started the load-generation only for the production stage to not impact the other stages.s.
+
+Now that everything is set up and we hit it with some load, next we are going to toggle the feature flags.
+
+
+## Run the experiment
+Duration: 5:00
+
+1. In this tutorial, we are going to turn on the promotional campaign, which purpose is to add promotional gifts to about 30 % of the user interactions that put items in their shopping cart.
+
+1. Click on the toggle next to **EnablePromotion** to enable this feature flag.
+
+
+
+ 
+
+1. By enabling this feature flag, a not implemented function is called resulting in a *NotImplementedFunction* error in the source code and a failed response. After a couple of minutes, the monitoring tool will detect an increase in the failure rate and will send out a problem notification to Keptn.
+
+1. Keptn will receive the problem notification/alert and look for a remediation action that matches this problem. Since we have added the `remediation.yaml` before, Keptn will find a remediation action and will trigger the corresponding action by reaching out to the action provider that will disable the feature flag.
+
+1. Finally, take a look into the Keptn's Bridge to see that an open problem has been resolved. You might notice that also the other stages like _dev_, and _staging_ received the error. The reason is that they all receive the same feature flag configuration and all receive traffic from the load generator. However, for _dev_ and _staging_ there is no `remediation.yaml` added and thus, no remediation will be performed if problems in this stages are detected. If you want to change this behaviour, go ahead and also add the `remediation.yaml` file to the other stages by executing another `keptn add-resource` command. For this tutorial, we are fine by only having self-healing for our production stage!
+
+ 
+
+1. 10 minutes after Keptn disables the feature flag, Keptn will also trigger another evaluation to make sure the trigger remediation action did actually resolve the problem. In case the problem is not resolved and the remediation file would hold more remediation actions, Keptn would go ahead and trigger them. For our tutorial Keptn has resolved the issue already, so no need for a second try!
+
+
diff --git a/site/tutorials/snippets/11/self-healing/featureFlagsDynatrace.md b/site/tutorials/snippets/11/self-healing/featureFlagsDynatrace.md
new file mode 100644
index 00000000..dc82045d
--- /dev/null
+++ b/site/tutorials/snippets/11/self-healing/featureFlagsDynatrace.md
@@ -0,0 +1,227 @@
+## Self-healing with feature flags
+Duration: 2:00
+
+Next, you will learn how to use the capabilities of Keptn to provide self-healing for an application with feature flags based on the [Unleash feature toggle framework](https://unleash.github.io/).
+
+
+Positive
+: For the sake of this tutorial, we will create an Unleash Keptn project. The carts microservice is already pre-configured for this.
+
+To quickly get an Unleash server up and running with Keptn, follow these instructions:
+
+1. Make sure you are in the correct folder of your examples directory:
+
+
+
+
+ ```
+ cd examples/unleash-server
+ ```
+
+
+1. Create a new project using the `keptn create project` command:
+
+
+ ```
+ keptn create project unleash --shipyard=./shipyard.yaml
+ ```
+
+1. Create a unleash and unleash-db service using the `keptn create service` and `keptn add-resource` commands:
+
+
+ ```
+ keptn create service unleash-db --project=unleash
+ keptn add-resource --project=unleash --service=unleash-db --all-stages --resource=./unleash-db.tgz --resourceUri=helm/unleash-db.tgz
+
+ keptn create service unleash --project=unleash
+ keptn add-resource --project=unleash --service=unleash --all-stages --resource=./unleash.tgz --resourceUri=helm/unleash.tgz
+ ```
+
+1. Send new artifacts for unleash and unleash-db using the `keptn trigger delivery` command:
+
+
+ ```
+ keptn trigger delivery --project=unleash --service=unleash-db --image=postgres:10.4
+ keptn trigger delivery --project=unleash --service=unleash --image=docker.io/keptnexamples/unleash:1.0.0
+ ```
+
+
+
+1. Get the URL (`unleash.unleash-dev.KEPTN_DOMAIN`):
+
+
+ ```
+ echo http://unleash.unleash-dev.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}')
+ ```
+
+1. Open the URL in your browser and log in using the following credentials:
+ * Username: keptn
+ * Password: keptn
+
+You should be able to login using the credentials *keptn/keptn*.
+
+## Configure the Unleash server
+Duration: 4:00
+
+In this tutorial, we are going to introduce feature toggles for two scenarios:
+
+1. Feature flag for a very simple caching mechanism that can speed up the delivery of the website since it skips the calls to the database but instead replies with static content.
+
+1. Feature flag for a promotional campaign that can be enabled whenever you want to run a promotional campaign on top of your shopping cart.
+
+To set up both feature flags, please use the following scripts to automatically generate the feature flags that we need in this tutorial.
+
+
+
+
+```
+UNLEASH_TOKEN=$(echo -n keptn:keptn | base64)
+UNLEASH_BASE_URL=$(echo http://unleash.unleash-dev.$(kubectl -n keptn get ingress api-keptn-ingress -ojsonpath='{.spec.rules[0].host}'))
+
+curl --request POST \
+ --url ${UNLEASH_BASE_URL}/api/admin/features/ \
+ --header "authorization: Basic ${UNLEASH_TOKEN}" \
+ --header 'content-type: application/json' \
+ --data '{
+ "name": "EnableItemCache",
+ "description": "carts",
+ "enabled": false,
+ "strategies": [
+ {
+ "name": "default",
+ "parameters": {}
+ }
+ ]
+}'
+
+curl --request POST \
+ --url ${UNLEASH_BASE_URL}/api/admin/features/ \
+ --header "authorization: Basic ${UNLEASH_TOKEN}" \
+ --header 'content-type: application/json' \
+ --data '{
+ "name": "EnablePromotion",
+ "description": "carts",
+ "enabled": false,
+ "strategies": [
+ {
+ "name": "default",
+ "parameters": {}
+ }
+ ]
+}'
+```
+
+### Optionally verify the generated feature flags
+
+If you want to verify the feature flags that have been created, please login to your Unleash server - or if you are already logged in - refresh the browser.
+
+
+
+
+## Configure Keptn
+Duration: 5:00
+
+Now, everything is set up in the Unleash server. For Keptn to be able to connect to the Unleash server, we have to add a secret with the Unleash API URL as well as the Unleash tokens.
+
+1. In order for Keptn to be able to use the Unleash API, we need to add the credentials as a secret to our Keptn namespace. In this tutorial, we do not have to change the values for UNLEASH_SERVER, UNLEASH_USER, and UNLEASH_TOKEN, but in your own custom scenario this might be needed to change it to your actual Unleash URL, user and token.
+As said, in this tutorial we can use the following command as it is:
+
+
+ ```
+ kubectl -n keptn create secret generic unleash --from-literal="UNLEASH_SERVER_URL=http://unleash.unleash-dev/api" --from-literal="UNLEASH_USER=keptn" --from-literal="UNLEASH_TOKEN=keptn"
+ ```
+
+1. Install the Unleash action provider which is responsible for acting upon an alert, thus it is the part that will actually resolve issues by changing the stage of the feature flags.
+
+
+ ```
+ kubectl apply -f https://raw.githubusercontent.com/keptn-contrib/unleash-service/release-0.3.2/deploy/service.yaml -n keptn
+ ```
+
+1. Switch to the carts example (`cd examples/onboarding-carts`) and add the following remediation instructions
+
+ ```
+ apiVersion: spec.keptn.sh/0.1.4
+ kind: Remediation
+ metadata:
+ name: carts-remediation
+ spec:
+ remediations:
+ - problemType: Response time degradation
+ actionsOnOpen:
+ - action: toggle-feature
+ name: Toogle feature flag
+ description: Toogle feature flag EnableItemCache to ON
+ value:
+ EnableItemCache: "on"
+ - problemType: Failure rate increase
+ actionsOnOpen:
+ - action: toggle-feature
+ name: Toogle feature flag
+ description: Toogle feature flag EnablePromotion to OFF
+ value:
+ EnablePromotion: "off"
+ ```
+
+ using the following command. Please make sure you are in the correct folder `examples/onboarding-carts`.
+
+
+
+
+ ```
+ keptn add-resource --project=sockshop --service=carts --stage=production --resource=remediation_feature_toggle.yaml --resourceUri=remediation.yaml
+ ```
+
+ **Note:** The file describes remediation actions (e.g., `featuretoggle`) in response to problems/alerts (e.g., `Response time degradation`) that are sent to Keptn.
+
+1. We are also going to add an SLO file so that Keptn can evaluate if the remediation action was successful.
+
+ ```
+ keptn add-resource --project=sockshop --stage=production --service=carts --resource=slo-self-healing-dynatrace.yaml --resourceUri=slo.yaml
+ ```
+
+1. Start the load generation script for this use case:
+
+ ```
+ kubectl apply -f ../load-generation/cartsloadgen/deploy/cartsloadgen-prod.yaml
+ ```
+
+Positive
+: Please note that in a production environment you would have to set it up differently. We have deployed Unleash only in a single-stage environment while we have our application that we manage with Unleash in a multi-stage environment. This was done for the sake of resource-saving.
+That means, in our example, the Unleash server will control the behavior for all three stages (this is what you would probably not want in a production environment). Therefore, we started the load-generation only for the production stage to not impact the other stages.
+
+Now that everything is set up and we hit it with some load, next we are going to toggle the feature flags.
+
+## Run the experiment
+Duration: 5:00
+
+1. In this tutorial, we are going to turn on the promotional campaign, which purpose is to add promotional gifts to about 30 % of the user interactions that put items in their shopping cart.
+
+1. Click on the toggle next to **EnablePromotion** to enable this feature flag.
+
+
+
+ 
+
+1. By enabling this feature flag, a not implemented function is called resulting in a *NotImplementedFunction* error in the source code and a failed response. After a couple of minutes, the monitoring tool will detect an increase in the failure rate and will send out a problem notification to Keptn.
+
+1. Keptn will receive the problem notification/alert and look for a remediation action that matches this problem. Since we have added the `remediation.yaml` before, Keptn will find a remediation action and will trigger the corresponding action by reaching out to the action provider that will disable the feature flag.
+
+1. Finally, take a look into the Keptn's Bridge to see that an open problem has been resolved. You might notice that also the other stages like _dev_, and _staging_ received the error. The reason is that they all receive the same feature flag configuration and all receive traffic from the load generator. However, for _dev_ and _staging_ there is no `remediation.yaml` added and thus, no remediation will be performed if problems in this stages are detected. If you want to change this behaviour, go ahead and also add the `remediation.yaml` file to the other stages by executing another `keptn add-resource` command. For this tutorial, we are fine by only having self-healing for our production stage!
+
+ 
+
+1. 10 minutes after Keptn disables the feature flag, Keptn will also trigger another evaluation to make sure the trigger remediation action did actually resolve the problem. In case the problem is not resolved and the remediation file would hold more remediation actions, Keptn would go ahead and trigger them. For our tutorial Keptn has resolved the issue already, so no need for a second try!
+
+
diff --git a/site/tutorials/snippets/11/self-healing/upscalePrometheus.md b/site/tutorials/snippets/11/self-healing/upscalePrometheus.md
new file mode 100644
index 00000000..051e7ffe
--- /dev/null
+++ b/site/tutorials/snippets/11/self-healing/upscalePrometheus.md
@@ -0,0 +1,191 @@
+
+
+## Enable Self-Healing
+Duration: 2:00
+
+Next, you will learn how to use the capabilities of Keptn to provide self-healing for an application without modifying code. In the next part, we configure Keptn to scale up the pods of an application if the application undergoes heavy CPU saturation.
+
+Negative
+: First, make sure you are in the correct folder `examples/onboarding-carts` otherwise the next commands will fail.
+
+For this usecase, we have prepared another version of the SLI file. Add it with the following command:
+
+```
+keptn add-resource --project=sockshop --stage=production --service=carts --resource=sli-config-prometheus-selfhealing.yaml --resourceUri=prometheus/sli.yaml
+```
+
+Add the prepared SLO file for self-healing to the production stage using the Keptn CLIs add-resource command:
+
+
+```
+keptn add-resource --project=sockshop --stage=production --service=carts --resource=slo-self-healing-prometheus.yaml --resourceUri=slo.yaml
+```
+
+Note: The SLO file contains an objective for response_time_p90.
+
+
+Configure Prometheus with the Keptn CLI (this configures the [Alert Manager](https://prometheus.io/docs/alerting/configuration/) based on the slo.yaml file):
+
+
+```
+keptn configure monitoring prometheus --project=sockshop --service=carts
+```
+
+Configure remediation actions for up-scaling based on Prometheus alerts:
+
+
+```
+keptn add-resource --project=sockshop --stage=production --service=carts --resource=remediation.yaml --resourceUri=remediation.yaml
+```
+
+This is the content of the file that has being added:
+
+```
+apiVersion: spec.keptn.sh/0.1.4
+kind: Remediation
+metadata:
+ name: carts-remediation
+spec:
+ remediations:
+ - problemType: Response time degradation
+ actionsOnOpen:
+ - action: scaling
+ name: scaling
+ description: Scale up
+ value: 1
+ - problemType: response_time_p90
+ actionsOnOpen:
+ - action: scaling
+ name: scaling
+ description: Scale up
+ value: 1
+```
+
+## Generate load for the service
+Duration: 3:00
+
+To simulate user traffic that is causing an unhealthy behavior in the carts service, please execute the following script. This will add special items into the shopping cart that cause some extensive calculation.
+
+1. Move to the correct folder for the load generation scripts:
+
+
+ ```
+ cd ../load-generation/cartsloadgen/deploy
+ ```
+
+1. Start the load generation script:
+
+
+ ```
+ kubectl apply -f cartsloadgen-faulty.yaml
+ ```
+
+1. (optional:) Verify the load in Prometheus.
+ - Make a port forward to access Prometheus:
+
+ ```
+ kubectl port-forward svc/prometheus-server 8080:80 -n monitoring
+ ```
+
+ - Access Prometheus from your browser on [http://localhost:8080](http://localhost:8080).
+
+ - In the **Graph** tab, add the expression
+
+ ```
+ histogram_quantile(0.9, sum by(le) (rate(http_response_time_milliseconds_bucket{job="carts-sockshop-production-primary"}[3m])))
+ ```
+
+ - Select the **Graph** tab to see your Response time metrics of the `carts` service in the `sockshop-production` environment.
+
+ - You should see a graph which locks similar to this:
+
+ 
+
+
+## Watch self-healing in action
+Duration: 10:00
+
+After approximately 10-15 minutes, the *Alert Manager* will send out an alert since the *service level objective* is not met anymore.
+
+To verify that an alert was fired, select the *Alerts* view where you should see that the alert `response_time_p90` is in the `firing` state:
+
+ 
+
+
+After receiving the alert notification, the *prometheus-service* will translate it into a Keptn CloudEvent. This event will eventually be received by the *remediation-service* that will look for a remediation action specified for this type of problem and, if found, execute it.
+
+In this tutorial, the number of pods will be increased to remediate the issue of the response time increase.
+
+1. (time saving opition) Instead of waiting 15 minutes for the *Alert Manager* to fire an alert, you can manually send the trigger of a remediation sequence by executing the following commands:
+
+ ```
+ echo -e "{\"type\": \"sh.keptn.event.production.remediation.triggered\",\"specversion\":\"1.0\",\"source\":\"https:\/\/github.com\/keptn\/keptn\/prometheus-service\",\"id\": \"f2b878d3-03c0-4e8f-bc3f-454bc1b3d79d\", \"time\": \"2019-06-07T07:02:15.64489Z\", \"contenttype\": \"application\/json\", \"data\": {\"project\": \"sockshop\",\"stage\": \"production\",\"service\": \"carts\",\"problem\": { \"problemTitle\": \"response_time_p90\",\"rootCause\": \"Response time degradation\"}}}" > remediation_trigger.json | keptn send event -f remediation_trigger.json
+ ```
+
+ This command sends the following event to Keptn to:
+
+ ```
+ {
+ "type": "sh.keptn.event.production.remediation.triggered",
+ "specversion": "1.0",
+ "source": "https://github.com/keptn/keptn/prometheus-service",
+ "id": "f2b878d3-03c0-4e8f-bc3f-454bc1b3d79d",
+ "time": "2019-06-07T07:02:15.64489Z",
+ "contenttype": "application/json",
+ "data": {
+ "project": "sockshop",
+ "stage": "production",
+ "service": "carts",
+ "problem": {
+ "problemTitle": "response_time_p90",
+ "rootCause": "Response time degradation"
+ }
+ }
+ }
+ ```
+
+1. Check the executed remediation actions by executing:
+
+
+
+
+ ```
+ kubectl get deployments -n sockshop-production
+ ```
+
+ You can see that the `carts-primary` deployment is now served by two pods:
+
+ ```
+ NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
+ carts-db 1 1 1 1 37m
+ carts-primary 2 2 2 2 32m
+ ```
+
+1. Also you should see an additional pod running when you execute:
+
+
+ ```
+ kubectl get pods -n sockshop-production
+ ```
+
+ ```
+ NAME READY STATUS RESTARTS AGE
+ carts-db-57cd95557b-r6cg8 1/1 Running 0 38m
+ carts-primary-7c96d87df9-75pg7 2/2 Running 0 33m
+ carts-primary-7c96d87df9-78fh2 2/2 Running 0 5m
+ ```
+
+1. To get an overview of the actions that got triggered by the response time SLO violation, you can use the Keptn's Bridge.
+
+ In this example, the bridge shows that the remediation service triggered an update of the configuration of the carts service by increasing the number of replicas to 2. When the additional replica was available, the wait-service waited for ten minutes for the remediation action to take effect. Afterwards, an evaluation by the lighthouse-service was triggered to check if the remediation action resolved the problem. In this case, increasing the number of replicas achieved the desired effect, since the evaluation of the service level objectives has been successful.
+
+ 
+ 
+
+1. Furthermore, you can use Prometheus to double-check the response time:
+
+ 
+
+1. Also, the Prometheus Alert Manager will show zero active alerts.
+
+ 
diff --git a/site/tutorials/snippets/11/simplenode/monitorDeployLoadSimplenode.md b/site/tutorials/snippets/11/simplenode/monitorDeployLoadSimplenode.md
new file mode 100644
index 00000000..7cbf49dd
--- /dev/null
+++ b/site/tutorials/snippets/11/simplenode/monitorDeployLoadSimplenode.md
@@ -0,0 +1,39 @@
+
+## Monitor, Deploy and Run Load against Simplenode
+Duration: 5:00
+
+In our tutorial we are not using Keptn for deploying our application. The goal is to show you that Keptn can also be used for Quality Gates or even Performance Evaluation on applications deployed manually or through other deployment automation tools.
+
+We do however need an application that is monitored by Dynatrace in order for Keptn to pull out metrics during the evaluation.
+
+In case you do not have your own application and the tutorial offered you to deploy this simple node.js-based sample application then follow these steps to deploy it and get it monitored by Dynatrace.
+
+### Host monitored with Dynatrace
+
+Before we deploy our application lets make sure that your host is monitored with Dynatrace. To accomplish that make sure that you install the [Dynatrace OneAgent](https://www.dynatrace.com/support/help/setup-and-configuration/dynatrace-oneagent/) on your host where you will be deploying our application later on. One option is to standup a virtual machine on your favorite cloud vendor. and either add the installation instructions of the OneAgent to the startup script or execute it once the machine is running. The end goal should be that Dynatrace is monitoring that host:
+
+
+
+Another option would be to deploy that application on a Kubernetes cluster. In that case please follow the instructions to [deploy the Dynatrace OneAgent on Kubernetes](https://www.dynatrace.com/support/help/technology-support/cloud-platforms/kubernetes/)
+
+### Deploy the sample app
+
+Now as we have Dynatrace monitor your host we can deploy our container. Here is the example on how to deploy it using docker:
+```
+docker run -p 80:8080 grabnerandi/simplenodeservice:1.0.0
+```
+This will deploy Build 1.0.0 of the simplenodeservice sample application and exposing it via port 80. If you have your firewalls setup correctly you should be able to navigate to the website as well as shortly after seeing the SimpleNodeJsService show up in Dynatrace:
+
+
+
+### Put some load on it
+
+Last what we need is some consistent load on the service so that later on when asking Keptn to evaluate SLIs against our SLOs Dynatrace has data for that time period.
+The examples repository from Git that you have cloned locally contains two options
+1: Running JMeter
+If you happen to have JMeter installed you can run the load.jmx script (from examples/simplenodeservice/keptn/jmeter). Before you do please change the SERVER_URL variable to the URL of your hosted version of the simplenode application. Additionally you should also change the ThreadGroups Loop Count setting to forever so that the test keeps running!
+
+2: Run a batch with CURL
+Another option is to launch the gen_load.sh file you will find under examples/simplenodeservice/helpers. This script uses curl to execute some simple GET requests. It will run until you either kill the process or until the script finds the file endloadtest.txt (which can be empty).
+
+To validate if load comes through navigate to the service in Dynatrace and validate that you see performance & trace data for your monitored service!
\ No newline at end of file
diff --git a/site/tutorials/tutorial-template.md b/site/tutorials/tutorial-template.md
index 02d93806..24c66496 100644
--- a/site/tutorials/tutorial-template.md
+++ b/site/tutorials/tutorial-template.md
@@ -1,7 +1,7 @@
summary: My first tutorial
id: my-first-tutorial-unique-id
categories: prometheus,gke,quality-gates
-tags: keptn07x
+tags: keptn11x
status: Draft
authors: JΓΌrgen Etzlstorfer
Feedback Link: https://github.com/keptn/tutorials/tree/master/site/tutorials