From 52e095b259ad7dacf841fdfa5025541f0566ac1c Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Tue, 15 Apr 2025 12:00:33 +0200 Subject: [PATCH 1/3] Adding Integration Source doc Signed-off-by: Matthias Wessendorf --- modules/ROOT/nav.adoc | 6 ++ .../integrationsource/aws_ddbstreams.adoc | 57 +++++++++++++++++++ .../eventing/integrationsource/aws_s3.adoc | 57 +++++++++++++++++++ .../eventing/integrationsource/aws_sqs.adoc | 57 +++++++++++++++++++ .../eventing/integrationsource/overview.adoc | 24 ++++++++ .../eventing/integrationsource/timer.adoc | 43 ++++++++++++++ 6 files changed, 244 insertions(+) create mode 100644 modules/serverless/pages/eventing/integrationsource/aws_ddbstreams.adoc create mode 100644 modules/serverless/pages/eventing/integrationsource/aws_s3.adoc create mode 100644 modules/serverless/pages/eventing/integrationsource/aws_sqs.adoc create mode 100644 modules/serverless/pages/eventing/integrationsource/overview.adoc create mode 100644 modules/serverless/pages/eventing/integrationsource/timer.adoc diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index ad07d057..17835119 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -10,6 +10,12 @@ *** xref:serverless:eventing/kafka-scaling-setup.adoc[Setup Autoscaling For Eventing Kafka Components] *** xref:serverless:eventing/backstage-setup.adoc[Setup Backstage for Eventing] *** xref:serverless:eventing/backstage-usage.adoc[Knative Event Mesh Backstage Plugin] +*** IntegrationSource +**** xref:serverless:eventing/integrationsource/overview.adoc[Overview] +**** xref:serverless:eventing/integrationsource/aws_ddbstreams.adoc[AWS DynamoDB Streams] +**** xref:serverless:eventing/integrationsource/aws_s3.adoc[AWS S3 Source] +**** xref:serverless:eventing/integrationsource/aws_sqs.adoc[AWS SQS Source] +**** xref:serverless:eventing/integrationsource/timer.adoc[Timer Source] ** Serving *** xref:serverless:serving/serving-with-ingress-sharding.adoc[Use Serving with OpenShift ingress sharding] *** xref:serverless:serving/scaleability-and-performance-of-serving.adoc[Scalability and performance of {serverlessproductname} Serving] diff --git a/modules/serverless/pages/eventing/integrationsource/aws_ddbstreams.adoc b/modules/serverless/pages/eventing/integrationsource/aws_ddbstreams.adoc new file mode 100644 index 00000000..64034f26 --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsource/aws_ddbstreams.adoc @@ -0,0 +1,57 @@ += AWS DynamoDB Streams +:compat-mode!: +// Metadata: +:description: AWS DynamoDB Streams in {serverlessproductname} + +This page describes how to use the AWS DynamoDB Streams with the `IntegrationSource` API for Eventing in {serverlessproductname}. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSource` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Amazon credentials + +For connecting to AWS the `IntegrationSource` uses Kubernetes `Secret`, present in the namespace of the resource. The `Secret` can be created like: + +[source,terminal] +---- +$ oc -n create secret generic my-secret --from-literal=aws.accessKey= --from-literal=aws.secretKey= +---- + +== AWS DynamoDB Streams Example + +Below is an `IntegrationSource` to receive events from Amazon DynamoDB Streams. + +[source,yaml] +---- +apiVersion: sources.knative.dev/v1alpha1 +kind: IntegrationSource +metadata: + name: integration-source-aws-ddb + namespace: knative-samples +spec: + aws: + ddbStreams: + table: "my-table" + region: "eu-north-1" + auth: + secret: + ref: + name: "my-secret" + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default +---- + +Inside of the `aws.ddbStreams` object we define the name of the table and its region. The credentials for the AWS service are referenced from the `my-secret` Kubernetes `Secret` + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/aws-ddb-streams-source.html[aws-ddb-streams-source]. diff --git a/modules/serverless/pages/eventing/integrationsource/aws_s3.adoc b/modules/serverless/pages/eventing/integrationsource/aws_s3.adoc new file mode 100644 index 00000000..36ed2484 --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsource/aws_s3.adoc @@ -0,0 +1,57 @@ += AWS S3 Source +:compat-mode!: +// Metadata: +:description: AWS S3 Source in {serverlessproductname} + +This page describes how to use the AWS S3 service with the `IntegrationSource` API for Eventing in {serverlessproductname}. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSource` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Amazon credentials + +For connecting to AWS the `IntegrationSource` uses Kubernetes `Secret`, present in the namespace of the resource. The `Secret` can be created like: + +[source,terminal] +---- +$ oc -n create secret generic my-secret --from-literal=aws.accessKey= --from-literal=aws.secretKey= +---- + +== AWS S3 Source Example + +Below is an `IntegrationSource` to receive data from an Amazon S3 Bucket. + +[source,yaml] +---- +apiVersion: sources.knative.dev/v1alpha1 +kind: IntegrationSource +metadata: + name: integration-source-aws-s3 + namespace: knative-samples +spec: + aws: + s3: + arn: "arn:aws:s3:::my-bucket" + region: "eu-north-1" + auth: + secret: + ref: + name: "my-secret" + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default +---- + +Inside of the `aws.s3` object we define the name of the bucket (or _arn_) and its region. The credentials for the AWS service are referenced from the `my-secret` Kubernetes `Secret` + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/aws-s3-source.html[aws-s3-source]. diff --git a/modules/serverless/pages/eventing/integrationsource/aws_sqs.adoc b/modules/serverless/pages/eventing/integrationsource/aws_sqs.adoc new file mode 100644 index 00000000..b9b28d63 --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsource/aws_sqs.adoc @@ -0,0 +1,57 @@ += AWS Simple Queue Service Source +:compat-mode!: +// Metadata: +:description: AWS Simple Queue Service Source in {serverlessproductname} + +This page describes how to use the Amazon Web Services (AWS) Simple Queue Service (SQS) with the `IntegrationSource` API for Eventing in {serverlessproductname}. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSource` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Amazon credentials + +For connecting to AWS the `IntegrationSource` uses Kubernetes `Secret`, present in the namespace of the resource. The `Secret` can be created like: + +[source,terminal] +---- +$ oc -n create secret generic my-secret --from-literal=aws.accessKey= --from-literal=aws.secretKey= +---- + +== AWS SQS Source Example + +Below is an `IntegrationSource` to receive data from AWS SQS. + +[source,yaml] +---- +apiVersion: sources.knative.dev/v1alpha1 +kind: IntegrationSource +metadata: + name: integration-source-aws-sqs + namespace: knative-samples +spec: + aws: + sqs: + arn: "arn:aws:s3:::my-queue" + region: "eu-north-1" + auth: + secret: + ref: + name: "my-secret" + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default +---- + +Inside of the `aws.sqs` object we define the name of the queue (or _arn_) and its region. The credentials for the AWS service are referenced from the `my-secret` Kubernetes `Secret` + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/aws-sqs-source.html[aws-sqs-source]. \ No newline at end of file diff --git a/modules/serverless/pages/eventing/integrationsource/overview.adoc b/modules/serverless/pages/eventing/integrationsource/overview.adoc new file mode 100644 index 00000000..0dc1d289 --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsource/overview.adoc @@ -0,0 +1,24 @@ += Knative Integration Source +:compat-mode!: +// Metadata: +:description: Knative Integration Source in {serverlessproductname} + +This page describes how to use the new `IntegrationSource` API for Eventing in {serverlessproductname}. The `IntegrationSource` is a Knative Eventing custom resource supporting selected https://camel.apache.org/camel-k/latest/kamelets/kamelets.html[Kamelets] from the https://camel.apache.org/[Apache Camel] project. Kamelets allow users to connect to 3rd party system for improved connectivity, they can act as "sources" or as "sinks". Therefore the `IntegrationSource` allows to consume data from external systems and forward them into Knative Eventing. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSource` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Supported Kamelet sources + +* xref:./aws_ddbstreams.adoc[AWS DDB Streams] +* xref:./aws_s3.adoc[AWS S3] +* xref:./aws_sqs.adoc[AWS SQS] +* xref:./timer.adoc[Generic timer] diff --git a/modules/serverless/pages/eventing/integrationsource/timer.adoc b/modules/serverless/pages/eventing/integrationsource/timer.adoc new file mode 100644 index 00000000..5f9476cd --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsource/timer.adoc @@ -0,0 +1,43 @@ += Timer Source +:compat-mode!: +// Metadata: +:description: Timer Source in {serverlessproductname} + +This page describes how to use the _Timer Kamelet_ with the `IntegrationSource` API for Eventing in {serverlessproductname}. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSource` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Timer Source Example + +Produces periodic messages with a custom payload. + +[source,yaml] +---- +apiVersion: sources.knative.dev/v1alpha1 +kind: IntegrationSource +metadata: + name: integration-source-timer + namespace: knative-samples +spec: + timer: + period: 2000 + message: "Hello, Eventing Core" + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default +---- + +Inside of the `timer` object we define the `period` and the message that is send to the referenced `sink`. + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/timer-source.html[timer-source]. \ No newline at end of file From 91a35d507f9f2aa2b564eed8d196cc843050b8be Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Tue, 15 Apr 2025 13:04:27 +0200 Subject: [PATCH 2/3] Adding Integration Sink doc Signed-off-by: Matthias Wessendorf --- modules/ROOT/nav.adoc | 6 +++ .../eventing/integrationsink/aws_s3.adoc | 52 +++++++++++++++++++ .../eventing/integrationsink/aws_sns.adoc | 52 +++++++++++++++++++ .../eventing/integrationsink/aws_sqs.adoc | 52 +++++++++++++++++++ .../eventing/integrationsink/logger.adoc | 38 ++++++++++++++ .../eventing/integrationsink/overview.adoc | 24 +++++++++ 6 files changed, 224 insertions(+) create mode 100644 modules/serverless/pages/eventing/integrationsink/aws_s3.adoc create mode 100644 modules/serverless/pages/eventing/integrationsink/aws_sns.adoc create mode 100644 modules/serverless/pages/eventing/integrationsink/aws_sqs.adoc create mode 100644 modules/serverless/pages/eventing/integrationsink/logger.adoc create mode 100644 modules/serverless/pages/eventing/integrationsink/overview.adoc diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index 17835119..2732330d 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -16,6 +16,12 @@ **** xref:serverless:eventing/integrationsource/aws_s3.adoc[AWS S3 Source] **** xref:serverless:eventing/integrationsource/aws_sqs.adoc[AWS SQS Source] **** xref:serverless:eventing/integrationsource/timer.adoc[Timer Source] +*** IntegrationSink +**** xref:serverless:eventing/integrationsink/overview.adoc[Overview] +**** xref:serverless:eventing/integrationsink/aws_s3.adoc[AWS S3 Sink] +**** xref:serverless:eventing/integrationsink/aws_sns.adoc[AWS SNS Sink] +**** xref:serverless:eventing/integrationsink/aws_sqs.adoc[AWS SQS Sink] +**** xref:serverless:eventing/integrationsink/logger.adoc[Logger Sink] ** Serving *** xref:serverless:serving/serving-with-ingress-sharding.adoc[Use Serving with OpenShift ingress sharding] *** xref:serverless:serving/scaleability-and-performance-of-serving.adoc[Scalability and performance of {serverlessproductname} Serving] diff --git a/modules/serverless/pages/eventing/integrationsink/aws_s3.adoc b/modules/serverless/pages/eventing/integrationsink/aws_s3.adoc new file mode 100644 index 00000000..32a1b24c --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsink/aws_s3.adoc @@ -0,0 +1,52 @@ += AWS S3 Sink +:compat-mode!: +// Metadata: +:description: AWS S3 Sink in {serverlessproductname} + +This page describes how to use the AWS S3 service with the `IntegrationSink` API for Eventing in {serverlessproductname}. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSink` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Amazon credentials + +For connecting to AWS the `IntegrationSink` uses Kubernetes `Secret`, present in the namespace of the resource. The `Secret` can be created like: + +[source,terminal] +---- +$ oc -n create secret generic my-secret --from-literal=aws.accessKey= --from-literal=aws.secretKey= +---- + +== AWS S3 Sink Example + +Below is an `IntegrationSink` to send data to an Amazon S3 Bucket: + +[source,yaml] +---- +apiVersion: sinks.knative.dev/v1alpha1 +kind: IntegrationSink +metadata: + name: integration-sink-aws-s3 + namespace: knative-samples +spec: + aws: + s3: + arn: "arn:aws:s3:::my-bucket" + region: "eu-north-1" + auth: + secret: + ref: + name: "my-secret" +---- + +Inside of the `aws.s3` object we define the name of the bucket (or _arn_) and its region. The credentials for the AWS service are referenced from the `my-secret` Kubernetes `Secret` + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/aws-s3-sink.html[aws-s3-sink]. diff --git a/modules/serverless/pages/eventing/integrationsink/aws_sns.adoc b/modules/serverless/pages/eventing/integrationsink/aws_sns.adoc new file mode 100644 index 00000000..ab9f6284 --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsink/aws_sns.adoc @@ -0,0 +1,52 @@ += AWS Simple Notification Service Sink +:compat-mode!: +// Metadata: +:description: AWS Simple Notification Service Sink in {serverlessproductname} + +This page describes how to use the Amazon Web Services (AWS) Simple Notification Service (SNS) with the `IntegrationSink` API for Eventing in {serverlessproductname}. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSink` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Amazon credentials + +For connecting to AWS the `IntegrationSink` uses Kubernetes `Secret`, present in the namespace of the resource. The `Secret` can be created like: + +[source,terminal] +---- +$ oc -n create secret generic my-secret --from-literal=aws.accessKey= --from-literal=aws.secretKey= +---- + +== AWS SNS Sink Example + +Below is an `IntegrationSink` to send data to AWS SNS: + +[source,yaml] +---- +apiVersion: sinks.knative.dev/v1alpha1 +kind: IntegrationSink +metadata: + name: integration-sink-aws-sns + namespace: knative-samples +spec: + aws: + sns: + arn: "my-topic" + region: "eu-north-1" + auth: + secret: + ref: + name: "my-secret" +---- + +Inside of the `aws.sns` object we define the name of the topic (or _arn_) and its region. The credentials for the AWS service are referenced from the `my-secret` Kubernetes `Secret` + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/aws-sns-sink.html[aws-sns-sink]. diff --git a/modules/serverless/pages/eventing/integrationsink/aws_sqs.adoc b/modules/serverless/pages/eventing/integrationsink/aws_sqs.adoc new file mode 100644 index 00000000..21d97478 --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsink/aws_sqs.adoc @@ -0,0 +1,52 @@ += AWS Simple Queue Service Sink +:compat-mode!: +// Metadata: +:description: AWS Simple Queue Service Sink in {serverlessproductname} + +This page describes how to use the Amazon Web Services (AWS) Simple Queue Service (SQS) with the `IntegrationSink` API for Eventing in {serverlessproductname}. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSink` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Amazon credentials + +For connecting to AWS the `IntegrationSink` uses Kubernetes `Secret`, present in the namespace of the resource. The `Secret` can be created like: + +[source,terminal] +---- +$ oc -n create secret generic my-secret --from-literal=aws.accessKey= --from-literal=aws.secretKey= +---- + +== AWS SQS Sink Example + +Below is an `IntegrationSink` to send data to AWS SQS: + +[source,yaml] +---- +apiVersion: sinks.knative.dev/v1alpha1 +kind: IntegrationSink +metadata: + name: integration-sink-aws-sqs + namespace: knative-samples +spec: + aws: + sqs: + arn: "arn:aws:s3:::my-queue" + region: "eu-north-1" + auth: + secret: + ref: + name: "my-secret" +---- + +Inside of the `aws.sqs` object we define the name of the queue (or _arn_) and its region. The credentials for the AWS service are referenced from the `my-secret` Kubernetes `Secret` + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/aws-sqs-sink.html[aws-sqs-sink]. diff --git a/modules/serverless/pages/eventing/integrationsink/logger.adoc b/modules/serverless/pages/eventing/integrationsink/logger.adoc new file mode 100644 index 00000000..a96aef2f --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsink/logger.adoc @@ -0,0 +1,38 @@ += Log Sink +:compat-mode!: +// Metadata: +:description: Log Sink in {serverlessproductname} + +This page describes how to use the _Log Sink Kamelet_ with the `IntegrationSink` API for Eventing in {serverlessproductname}. This sink is useful for debugging purposes. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSink` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Log Sink Example + +Below is an `IntegrationSink` that logs all data that it receives: + +[source,yaml] +---- +apiVersion: sinks.knative.dev/v1alpha1 +kind: IntegrationSink +metadata: + name: integration-log-sink + namespace: knative-samples +spec: + log: + showHeaders: true + level: INFO +---- + +Inside of the `log` object we define the logging `level` and define to also show (HTTP) headers it received. + +More details about the Apache Camel Kamelet https://camel.apache.org/camel-kamelets/latest/log-sink.html[log-sink]. diff --git a/modules/serverless/pages/eventing/integrationsink/overview.adoc b/modules/serverless/pages/eventing/integrationsink/overview.adoc new file mode 100644 index 00000000..e701bc8f --- /dev/null +++ b/modules/serverless/pages/eventing/integrationsink/overview.adoc @@ -0,0 +1,24 @@ += Knative Integration Sink +:compat-mode!: +// Metadata: +:description: Knative Integration Source in {serverlessproductname} + +This page describes how to use the new `IntegrationSink` API for Eventing in {serverlessproductname}. The `IntegrationSink` is a Knative Eventing custom resource supporting selected https://camel.apache.org/camel-k/latest/kamelets/kamelets.html[Kamelets] from the https://camel.apache.org/[Apache Camel] project. Kamelets allow users to connect to 3rd party system for improved connectivity, they can act as "sources" or as "sinks". Therefore the `IntegrationSink` allows sending data to external systems out of Knative Eventing in the format of CloudEvents. + +[IMPORTANT] +==== +{serverlessproductname} `IntegrationSink` is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Supported Kamelet sinks + +* xref:./aws_s3.adoc[AWS S3] +* xref:./aws_sns.adoc[AWS SNS] +* xref:./aws_sqs.adoc[AWS SQS] +* xref:./logger.adoc[Generic logger] From 4f12671cd59ecd177e1ebb8230fb3463e300ec4f Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Tue, 22 Apr 2025 17:28:07 +0200 Subject: [PATCH 3/3] Adding preview docs for Event Transform API Signed-off-by: Matthias Wessendorf --- modules/ROOT/nav.adoc | 3 + .../eventing/eventtransform/jsonata.adoc | 436 ++++++++++++++++++ .../eventing/eventtransform/overview.adoc | 290 ++++++++++++ 3 files changed, 729 insertions(+) create mode 100644 modules/serverless/pages/eventing/eventtransform/jsonata.adoc create mode 100644 modules/serverless/pages/eventing/eventtransform/overview.adoc diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index 2732330d..a0bd6789 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -10,6 +10,9 @@ *** xref:serverless:eventing/kafka-scaling-setup.adoc[Setup Autoscaling For Eventing Kafka Components] *** xref:serverless:eventing/backstage-setup.adoc[Setup Backstage for Eventing] *** xref:serverless:eventing/backstage-usage.adoc[Knative Event Mesh Backstage Plugin] +*** Event Transformations +**** xref:serverless:eventing/eventtransform/overview.adoc[Overview] +**** xref:serverless:eventing/eventtransform/jsonata.adoc[Event Transformations for JSON with JSONata] *** IntegrationSource **** xref:serverless:eventing/integrationsource/overview.adoc[Overview] **** xref:serverless:eventing/integrationsource/aws_ddbstreams.adoc[AWS DynamoDB Streams] diff --git a/modules/serverless/pages/eventing/eventtransform/jsonata.adoc b/modules/serverless/pages/eventing/eventtransform/jsonata.adoc new file mode 100644 index 00000000..e9f199d2 --- /dev/null +++ b/modules/serverless/pages/eventing/eventtransform/jsonata.adoc @@ -0,0 +1,436 @@ += Event Transformations for JSON with JSONata +:compat-mode!: +// Metadata: +:description: Event Transformations for JSON with JSONata in {serverlessproductname} + +[IMPORTANT] +==== +{serverlessproductname} Event Transformation is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Introduction to JSONata + +https://jsonata.org/[JSONata] is a lightweight query and transformation language for JSON data. In Knative EventTransform, JSONata expressions allow you to: + +* Extract values from event data +* Promote data fields to CloudEvent attributes +* Restructure event payloads +* Add computed values +* Apply conditional logic + +== Basic Usage + +To use JSONata in an EventTransform resource, specify the expression in the `spec.jsonata.expression` field: + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: simple-transform +spec: + jsonata: + expression: | + { + "specversion": "1.0", + "id": id, + "time": time, + "type": "transformed.type", + "source": "transform.simple", + "data": data + } +---- + +== CloudEvent Structure + +The input to the JSONata expression is the entire CloudEvent, including all its attributes and data. Your expression must produce a valid CloudEvent with at least these required fields: + +* `specversion`: Should be set to "1.0" +* `id`: A unique identifier for the event +* `type`: The event type +* `source`: The event source +* `data`: The event payload + +== Common Transformation Patterns + +=== Preserving Original Event Structure + +To preserve the original event structure while adding or modifying attributes: + +[source,json] +---- +{ + "specversion": "1.0", + "id": id, + "type": type, + "source": source, + "time": time, + "data": data, + "newattribute": "static value" +} +---- + +=== Extracting Fields as Attributes + +To extract fields from the data and promote them to CloudEvent attributes: + +[source,json] +---- +{ + "specversion": "1.0", + "id": id, + "type": "user.event", + "source": source, + "time": time, + "userid": data.user.id, + "region": data.region, + "data": $ +} +---- + +The `$` symbol in JSONata represents the entire input object, so `data: $` preserves the entire original event data. + +=== Restructuring Event Data + +To completely reshape the event data: + +[source,json] +---- +{ + "specversion": "1.0", + "id": order.id, + "type": "order.transformed", + "source": "transform.order-processor", + "time": order.time, + "orderid": order.id, + "data": { + "customer": { + "id": order.user.id, + "name": order.user.name + }, + "items": order.items.{ "sku": sku, "quantity": qty, "price": price }, + "total": $sum(order.items.(price * qty)) + } +} +---- + +Given the transformation above, and this JSON object as input: + +[source,json] +---- +{ + "order": { + "time" : "2024-04-05T17:31:05Z", + "id": "8a76992e-cbe2-4dbe-96c0-7a951077089d", + "user": { + "id": "bd9779ef-cba5-4ad0-b89b-e23913f0a7a7", + "name": "John Doe" + }, + "items": [ + {"sku": "KNATIVE-1", "price": 99.99, "qty": 1}, + {"sku": "KNATIVE-2", "price": 129.99, "qty": 2} + ] + } +} +---- + +It would produce: + +[source,json] +---- +{ + "specversion": "1.0", + "id": "8a76992e-cbe2-4dbe-96c0-7a951077089d", + "type": "order.transformed", + "source": "transform.order-processor", + "time": "2024-04-05T17:31:05Z", + "orderid": "8a76992e-cbe2-4dbe-96c0-7a951077089d", + "data": { + "customer": { + "id": "bd9779ef-cba5-4ad0-b89b-e23913f0a7a7", + "name": "John Doe" + }, + "items": [ + { + "sku": "KNATIVE-1", + "quantity": 1, + "price": 99.99 + }, + { + "sku": "KNATIVE-2", + "quantity": 2, + "price": 129.99 + } + ], + "total": 359.97 + } +} +---- + +=== Conditional Transformations + +To apply different transformations based on conditions: + +[source,json] +---- +{ + "specversion": "1.0", + "id": id, + "type": type = "order.created" ? "new.order" : "updated.order", + "source": source, + "time": time, + "priority": data.total > 1000 ? "high" : "normal", + "data": $ +} +---- + +== Advanced JSONata Features + +=== Array Processing + +JSONata makes it easy to process arrays in your event data: + +[source,json] +---- +{ + "specversion": "1.0", + "id": id, + "type": "order.processed", + "source": source, + "time": $now(), + "itemcount": $count(order.items), + "multiorder": $count(order.items) > 1, + "data": { + "order": order.id, + "items": order.items[quantity > 1].{ + "product": name, + "quantity": quantity, + "lineTotal": price * quantity + }, + "totalvalue": $sum(order.items.(price * quantity)) + } +} +---- + +Given the transformation above, and this JSON object as input: + +[source,json] +---- +{ + "id": "12345", + "source": "https://example.com/orders", + "order": { + "id": "order-67890", + "items": [ + { + "name": "Laptop", + "price": 1000, + "quantity": 1 + }, + { + "name": "Mouse", + "price": 50, + "quantity": 2 + }, + { + "name": "Keyboard", + "price": 80, + "quantity": 3 + } + ] + } +} +---- + +It would produce: + +[source,json] +---- +{ + "specversion": "1.0", + "id": "12345", + "type": "order.processed", + "source": "https://example.com/orders", + "time": "2025-03-03T09:13:23.753Z", + "itemcount": 3, + "multiorder": true, + "data": { + "order": "order-67890", + "items": [ + { + "product": "Mouse", + "quantity": 2, + "lineTotal": 100 + }, + { + "product": "Keyboard", + "quantity": 3, + "lineTotal": 240 + } + ], + "totalvalue": 1340 + } +} +---- + +=== Using Built-in Functions + +JSONata provides many useful functions: + +[source,json] +---- +{ + "specversion": "1.0", + "id": id, + "type": "user.event", + "source": source, + "time": time, + "timestamp": $now(), + "username": $lowercase(data.user.name), + "initials": $join($map($split(data.user.name, " "), function($v) { $substring($v, 0, 1) }), ""), + "data": $ +} +---- + +== Transforming Replies + +When using the EventTransform with a sink, you can also transform the responses: + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: request-reply-transform +spec: + sink: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: processor-service + jsonata: + expression: | + # Request transformation + { + "specversion": "1.0", + "id": id, + "type": "request.transformed", + "source": source, + "time": time, + "data": data + } + reply: + jsonata: + expression: | + # Reply transformation + { + "specversion": "1.0", + "id": id, + "type": "reply.transformed", + "source": "transform.reply-processor", + "time": time, + "data": data + } +---- + +== Best Practices + +1. *Always produce valid CloudEvents*: Ensure your expressions include all required CloudEvent fields. + +2. *Test expressions thoroughly*: Use the https://try.jsonata.org/[JSONata Exerciser] to validate complex expressions. + +3. *Keep expressions readable*: Use line breaks and indentation in your YAML to make expressions easier to read and maintain. + +4. *Handle missing data*: Use the `?` operator to provide default values for potentially missing fields. + +5. *Avoid infinite loops*: When using the reply feature with a Broker, make sure to change the event type or add filters to prevent infinite loops. + +== Examples + +=== User Registration Event Transformer + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: user-registration-transformer +spec: + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default + jsonata: + expression: | + { + "specversion": "1.0", + "id": id, + "type": "user.registered.processed", + "source": "transform.user-processor", + "time": time, + "userid": data.user.id, + "region": data.region ? data.region : "unknown", + "tier": data.subscription.tier ? data.subscription.tier : "free", + "data": { + "userId": data.user.id, + "email": $lowercase(data.user.email), + "displayName": data.user.name ? data.user.name : $substring(data.user.email, 0, $indexOf(data.user.email, "@")), + "registrationDate": $now(), + "subscription": data.subscription ? data.subscription : { "tier": "free" } + } + } +---- + +=== Order Processing Event Transformer + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: order-processor +spec: + jsonata: + expression: | + { + "specversion": "1.0", + "id": id, + "type": "order.processed", + "source": "transform.order-processor", + "time": time, + "orderid": data.id, + "customerid": data.customer.id, + "region": data.region, + "priority": $sum(data.items.(price * quantity)) > 1000 ? "high" : "standard", + "data": { + "orderId": data.id, + "customer": data.customer, + "items": data.items.{ + "productId": productId, + "name": name, + "quantity": quantity, + "unitPrice": price, + "totalPrice": price * quantity + }, + "total": $sum(data.items.(price * quantity)), + "tax": $sum(data.items.(price * quantity)) * 0.1, + "grandTotal": $sum(data.items.(price * quantity)) * 1.1, + "created": data.created, + "processed": $now() + } + } +---- + +== Further Resources + +* xref:./overview.adoc[EventTransform Overview and deployment patterns] +* https://jsonata.org/documentation.html[JSONata Documentation] +* https://try.jsonata.org/[JSONata Exerciser] +* https://github.com/cloudevents/spec[CloudEvents Specification] \ No newline at end of file diff --git a/modules/serverless/pages/eventing/eventtransform/overview.adoc b/modules/serverless/pages/eventing/eventtransform/overview.adoc new file mode 100644 index 00000000..5b87eaac --- /dev/null +++ b/modules/serverless/pages/eventing/eventtransform/overview.adoc @@ -0,0 +1,290 @@ += Event Transformation +:compat-mode!: +// Metadata: +:description: Event Transformation in {serverlessproductname} + +[IMPORTANT] +==== +{serverlessproductname} Event Transformation is a Developer Preview feature only. + +Developer Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. +Red Hat does not recommend using them in production. +These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information about the support scope of Red Hat Developer Preview features, see https://access.redhat.com/support/offerings/devpreview/. +==== + +== Overview + +`EventTransform` is a Knative API resource that enables declarative transformations of HTTP requests and responses +without requiring custom code. It allows you to modify event attributes, extract data from event payloads, and reshape +events to fit different systems' requirements. + +EventTransform is designed to be a flexible component in your event-driven architecture that can be placed at various +points in your event flow to facilitate seamless integration between diverse systems. + +== Key Features + +* *Declarative transformations* using standard Kubernetes resources +* *JSONata expressions* for powerful data extraction and transformation +* *Addressable resource* that can be referenced from any Knative source, trigger, or subscription +* *Flexible deployment options* within your event flow +* *Sink configuration* to direct transformed events to specific destinations +* *Reply support* to leverage Broker's built-in reply feature + +== Common Use Cases + +=== Field Extraction + +Extract specific fields from event payloads and promote them as CloudEvent attributes for filtering: + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: extract-user-id +spec: + jsonata: + expression: | + { + "specversion": "1.0", + "id": id, + "type": "user.extracted", + "source": "transform.user-extractor", + "time": time, + "userid": data.user.id, + "data": $ + } +---- + +=== Event Format Conversion + +Transform events from one format to another to ensure compatibility between systems: + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: format-converter +spec: + sink: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: destination-service + jsonata: + expression: | + { + "specversion": "1.0", + "id": id, + "type": "order.converted", + "source": "transform.format-converter", + "time": time, + "data": { + "orderId": data.id, + "customer": { + "name": data.user.fullName, + "email": data.user.email + }, + "items": data.items + } + } +---- + +=== Event Enrichment + +Add additional context or metadata to events: + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: event-enricher +spec: + jsonata: + expression: | + { + "specversion": "1.0", + "id": id, /* Add the "id", "type", "source", and "time" attributes based on the input JSON object fields */ + "type": type, + "source": source, + "time": time, + "environment": "production", /* Add fixed environment and region attributes to the event metadata */ + "region": "us-west-1", + "data": $ /* Add the event transform input JSON body as CloudEvent "data" field */ + } +---- + +=== Event Response Reply Transformation + +When using the EventTransform with a sink, you can also transform the responses from the sink: + +[IMPORTANT] +==== +The same type of transformation must be used for Sink and Reply transformations. +==== + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: request-reply-transform +spec: + sink: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: processor-service + jsonata: + expression: | + # Request transformation + { + "specversion": "1.0", + "id": id, + "type": "request.transformed", + "source": source, + "time": time, + "data": data + } + reply: + jsonata: + expression: | + # Reply transformation + { + "specversion": "1.0", + "id": id, + "type": "reply.transformed", + "source": "transform.reply-processor", + "time": time, + "data": data + } +---- + +== Deployment Patterns + +EventTransform can be used in different positions within your event flow: + +=== Source → EventTransform → Broker + +Transform events before they reach the Broker: + +[source,yaml] +---- +apiVersion: sources.knative.dev/v1 +kind: ApiServerSource +metadata: + name: k8s-events +spec: + serviceAccountName: event-watcher + resources: + - apiVersion: v1 + kind: Event + sink: + ref: + apiVersion: eventing.knative.dev/v1alpha1 + kind: EventTransform + name: event-transformer +--- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: event-transformer +spec: + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default + jsonata: + expression: | + # transformation expression +---- + +=== Broker → Trigger → EventTransform → Service or Sink + +Transform events after they're filtered by a Trigger: + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1 +kind: Trigger +metadata: + name: transform-trigger +spec: + broker: default + filter: + attributes: + type: original.event.type + subscriber: + ref: + apiVersion: eventing.knative.dev/v1alpha1 + kind: EventTransform + name: event-transformer +--- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: event-transformer +spec: + sink: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: destination-service + jsonata: + expression: | + # transformation expression +---- + +=== Using Broker Reply Feature + +Transform events and republish them back to the Broker: + +[IMPORTANT] +==== +Preventing infinite event loops: When using the reply feature with a Broker, you must ensure that your transformed +events don't trigger the same Trigger that sent them to the EventTransform in the first place. +==== + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1 +kind: Trigger +metadata: + name: transform-trigger +spec: + broker: default + filter: + attributes: + type: original.event.type + subscriber: + ref: + apiVersion: eventing.knative.dev/v1alpha1 + kind: EventTransform + name: event-transformer +--- +apiVersion: eventing.knative.dev/v1alpha1 +kind: EventTransform +metadata: + name: event-transformer +spec: + # No sink specified - will use reply feature + jsonata: + expression: | + { + "specversion": "1.0", + "id": id, + "time": time, + "type": "transformed.event.type", + "source": "transform.event-transformer", + "data": $ + } +---- + +== Next Steps + +* xref:./jsonata.adoc[JSONata Transformations] - Learn about using JSONata expressions for event transformations