From d9d38587a366754d82658946a6cd66110588288b Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 31 Jul 2023 13:30:12 +0100 Subject: [PATCH 01/90] Create dependency-graph.yml (#204) --- .github/workflows/dependency-graph.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/dependency-graph.yml diff --git a/.github/workflows/dependency-graph.yml b/.github/workflows/dependency-graph.yml new file mode 100644 index 000000000..f8facc045 --- /dev/null +++ b/.github/workflows/dependency-graph.yml @@ -0,0 +1,12 @@ +name: Update Dependency Graph +on: + push: + branches: + - main # default branch of the project +jobs: + dependency-graph: + name: Update Dependency Graph + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: scalacenter/sbt-dependency-submission@v2 From b9fad2d7834600b18d3e7b9597a6e9f0b05e67c0 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Mon, 31 Jul 2023 09:59:39 +0200 Subject: [PATCH 02/90] Add commands to apply and check codestyle --- .github/workflows/check-build-test.yml | 4 ++-- CONTRIBUTING.md | 6 ++++++ build.sbt | 13 +++++++------ 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/check-build-test.yml b/.github/workflows/check-build-test.yml index 14d238292..52c5fd767 100644 --- a/.github/workflows/check-build-test.yml +++ b/.github/workflows/check-build-test.yml @@ -41,8 +41,8 @@ jobs: uses: coursier/cache-action@v6.4.0 # temporarily remove mima checks - - name: "Code style, compile tests, MiMa. Run locally with: sbt +~2.13 \"verifyCodeFmt; Test/compile; mimaReportBinaryIssues\"" - run: sbt "verifyCodeFmt; +Test/compile" + - name: "Code style, compile tests, MiMa. Run locally with: sbt +~2.13 \"javafmtCheckAll; Test/compile; mimaReportBinaryIssues\"" + run: sbt "javafmtCheckAll; +Test/compile" documentation: name: ScalaDoc, Documentation with Paradox diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7c0476df4..bdbdd52d6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -81,6 +81,12 @@ Example: * Details 2 * Details 3 +## Applying code style to the project + +The project uses [scalafmt](https://scalameta.org/scalafmt/) to ensure code quality which is automatically checked on +every PR. If you would like to check for any potential code style problems locally you can run `sbt checkCodeStyle` +and if you want to apply the code style then you can run `sbt applyCodeStyle`. + ### Ignoring formatting commits in git blame Throughout the history of the codebase various formatting commits have been applied as the scalafmt style has evolved over time, if desired diff --git a/build.sbt b/build.sbt index a9969e7a7..47176e169 100644 --- a/build.sbt +++ b/build.sbt @@ -98,6 +98,11 @@ lazy val `pekko-connectors` = project | | mimaReportBinaryIssues - checks whether this current API | is binary compatible with the released version + | + | checkCodeStyle - checks that the codebase follows code + | style + | + | applyCodeStyle - applies code style to the codebase """.stripMargin, // unidoc combines sources and jars from all connectors and that // might include some incompatible ones. Depending on the @@ -124,12 +129,8 @@ lazy val `pekko-connectors` = project crossScalaVersions := List() // workaround for https://github.com/sbt/sbt/issues/3465 ) -TaskKey[Unit]("verifyCodeFmt") := { - javafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ => - throw new MessageOnlyException( - "Unformatted Java code found. Please run 'javafmtAll' and commit the reformatted code") - } -} +addCommandAlias("applyCodeStyle", ";scalafmtAll; scalafmtSbt; javafmtAll") +addCommandAlias("checkCodeStyle", ";scalafmtCheckAll; scalafmtSbtCheck; javafmtCheckAll") lazy val amqp = pekkoConnectorProject("amqp", "amqp", Dependencies.Amqp) From 66fdd04a725af759bb3ca98b58f57dc6ede4047d Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Fri, 28 Jul 2023 16:47:47 +0200 Subject: [PATCH 03/90] Add sbt-license-report --- build.sbt | 9 +- docs/src/main/paradox/overview.md | 1 + project/Dependencies.scala | 230 ++++++++++++------------------ project/LicenseReport.scala | 67 +++++++++ project/plugins.sbt | 1 + 5 files changed, 170 insertions(+), 138 deletions(-) create mode 100644 project/LicenseReport.scala diff --git a/build.sbt b/build.sbt index 47176e169..16ae3b173 100644 --- a/build.sbt +++ b/build.sbt @@ -418,7 +418,14 @@ lazy val docs = project "examples/jms-samples.html", "examples/mqtt-samples.html", "index.html"), - apidocRootPackage := "org.apache.pekko") + apidocRootPackage := "org.apache.pekko", + Compile / paradoxMarkdownToHtml / sourceGenerators += Def.taskDyn { + val targetFile = (Compile / paradox / sourceManaged).value / "license-report.md" + + (LocalRootProject / dumpLicenseReportAggregate).map { dir => + IO.copy(List(dir / "pekko-connectors-root-licenses.md" -> targetFile)).toList + } + }.taskValue) lazy val testkit = internalProject("testkit", Dependencies.testkit) diff --git a/docs/src/main/paradox/overview.md b/docs/src/main/paradox/overview.md index 94af30642..ea2d13cf3 100644 --- a/docs/src/main/paradox/overview.md +++ b/docs/src/main/paradox/overview.md @@ -45,5 +45,6 @@ We want Apache Pekko and Apache Pekko Connectors to strive in a welcoming and op * [Other documentation resources](other-docs/index.md) * [Integration Patterns](patterns.md) * [Release notes](release-notes/index.md) +* [License Report](license-report.md) @@@ diff --git a/project/Dependencies.scala b/project/Dependencies.scala index c3d1b0a1f..0e739f797 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -66,12 +66,11 @@ object Dependencies { "org.apache.pekko" %% "pekko-stream" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion, "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, - "ch.qos.logback" % "logback-classic" % "1.2.11", // Eclipse Public License 1.0 + "ch.qos.logback" % "logback-classic" % "1.2.11", "org.scalatest" %% "scalatest" % ScalaTestVersion, "com.dimafeng" %% "testcontainers-scala-scalatest" % TestContainersScalaTestVersion, - "com.novocode" % "junit-interface" % "0.11", // BSD-style - "junit" % "junit" % "4.13" // Eclipse Public License 1.0 - )) + "com.novocode" % "junit-interface" % "0.11", + "junit" % "junit" % "4.13")) val Mockito = Seq( "org.mockito" % "mockito-core" % mockitoVersion % Test, @@ -89,24 +88,20 @@ object Dependencies { val Amqp = Seq( libraryDependencies ++= Seq( - "com.rabbitmq" % "amqp-client" % "5.14.2" // APLv2 - ) ++ Mockito) + "com.rabbitmq" % "amqp-client" % "5.14.2") ++ Mockito) val AwsLambda = Seq( libraryDependencies ++= Seq( - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, // ApacheV2 - ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( // ApacheV2 - + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, + ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( ExclusionRule(organization = "org.apache.pekko")), - ("software.amazon.awssdk" % "lambda" % AwsSdk2Version).excludeAll( // ApacheV2 - + ("software.amazon.awssdk" % "lambda" % AwsSdk2Version).excludeAll( ExclusionRule("software.amazon.awssdk", "apache-client"), ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito) val AzureStorageQueue = Seq( libraryDependencies ++= Seq( - "com.microsoft.azure" % "azure-storage" % "8.0.0" // ApacheV2 - )) + "com.microsoft.azure" % "azure-storage" % "8.0.0")) val CassandraVersionInDocs = "4.0" val CassandraDriverVersion = "4.15.0" @@ -122,33 +117,28 @@ object Dependencies { val Couchbase = Seq( libraryDependencies ++= Seq( - "com.couchbase.client" % "java-client" % CouchbaseVersion, // ApacheV2 - "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", // ApacheV2 - "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided, // Apache V2 - "com.typesafe.play" %% "play-json" % "2.9.2" % Test, // Apache V2 - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion % Test // Apache V2 - )) + "com.couchbase.client" % "java-client" % CouchbaseVersion, + "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", + "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided, + "com.typesafe.play" %% "play-json" % "2.9.2" % Test, + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion % Test)) val `Doc-examples` = Seq( libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion % Test, "org.apache.pekko" %% "pekko-connectors-kafka" % "0.0.0+1761-2291eac2-SNAPSHOT" % Test, - "junit" % "junit" % "4.13.2" % Test, // Eclipse Public License 1.0 - "org.scalatest" %% "scalatest" % ScalaTestVersion % Test // ApacheV2 - )) + "junit" % "junit" % "4.13.2" % Test, + "org.scalatest" %% "scalatest" % ScalaTestVersion % Test)) val DynamoDB = Seq( libraryDependencies ++= Seq( - ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( // ApacheV2 - + ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( ExclusionRule(organization = "org.apache.pekko")), - ("software.amazon.awssdk" % "dynamodb" % AwsSdk2Version).excludeAll( // ApacheV2 - + ("software.amazon.awssdk" % "dynamodb" % AwsSdk2Version).excludeAll( ExclusionRule("software.amazon.awssdk", "apache-client"), ExclusionRule("software.amazon.awssdk", "netty-nio-client")), - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion // ApacheV2 - )) + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion)) val Elasticsearch = Seq( libraryDependencies ++= Seq( @@ -158,25 +148,22 @@ object Dependencies { val File = Seq( libraryDependencies ++= Seq( - "com.google.jimfs" % "jimfs" % "1.2" % Test // ApacheV2 - )) + "com.google.jimfs" % "jimfs" % "1.2" % Test)) val AvroParquet = Seq( libraryDependencies ++= Seq( - "org.apache.parquet" % "parquet-avro" % "1.10.1", // Apache2 - ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 - ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 + "org.apache.parquet" % "parquet-avro" % "1.10.1", + ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), + ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), "com.sksamuel.avro4s" %% "avro4s-core" % "4.1.1" % Test, "org.scalacheck" %% "scalacheck" % scalaCheckVersion % Test, - "org.specs2" %% "specs2-core" % "4.8.3" % Test, // MIT like: https://github.com/etorreborre/specs2/blob/master/LICENSE.txt - "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html - )) + "org.specs2" %% "specs2-core" % "4.8.3" % Test, + "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) val Ftp = Seq( libraryDependencies ++= Seq( - "commons-net" % "commons-net" % "3.8.0", // ApacheV2 - "com.hierynomus" % "sshj" % "0.33.0" // ApacheV2 - )) + "commons-net" % "commons-net" % "3.8.0", + "com.hierynomus" % "sshj" % "0.33.0")) val GeodeVersion = "1.15.0" val GeodeVersionForDocs = "115" @@ -193,10 +180,9 @@ object Dependencies { libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "com.github.jwt-scala" %% "jwt-spray-json" % "7.1.4", // ApacheV2 - "com.google.auth" % "google-auth-library-credentials" % "0.24.1", // BSD 3-clause - "io.specto" % "hoverfly-java" % hoverflyVersion % Test // ApacheV2 - ) ++ Mockito) + "com.github.jwt-scala" %% "jwt-spray-json" % "7.1.4", + "com.google.auth" % "google-auth-library-credentials" % "0.24.1", + "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) val GoogleBigQuery = Seq( libraryDependencies ++= Seq( @@ -206,16 +192,15 @@ object Dependencies { "io.spray" %% "spray-json" % "1.3.6", "com.fasterxml.jackson.core" % "jackson-annotations" % JacksonDatabindVersion, "com.fasterxml.jackson.datatype" % "jackson-datatype-jsr310" % JacksonDatabindVersion % Test, - "io.specto" % "hoverfly-java" % hoverflyVersion % Test // ApacheV2 - ) ++ Mockito) + "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) val GoogleBigQueryStorage = Seq( // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-bigquerystorage/tree/master/proto-google-cloud-bigquerystorage-v1 - "com.google.api.grpc" % "proto-google-cloud-bigquerystorage-v1" % "1.22.0" % "protobuf-src", // ApacheV2 + "com.google.api.grpc" % "proto-google-cloud-bigquerystorage-v1" % "1.22.0" % "protobuf-src", "org.apache.avro" % "avro" % "1.9.2" % "provided", "org.apache.arrow" % "arrow-vector" % "4.0.0" % "provided", - "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, // ApacheV2 + "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-core" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, @@ -227,16 +212,15 @@ object Dependencies { libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "com.github.tomakehurst" % "wiremock" % "2.27.2" % Test // ApacheV2 - ) ++ Mockito) + "com.github.tomakehurst" % "wiremock" % "2.27.2" % Test) ++ Mockito) val GooglePubSubGrpc = Seq( // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-pubsub/tree/master/proto-google-cloud-pubsub-v1/ - "com.google.cloud" % "google-cloud-pubsub" % "1.112.5" % "protobuf-src", // ApacheV2 - "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, // ApacheV2 - "com.google.auth" % "google-auth-library-oauth2-http" % "0.22.2", // BSD 3-clause + "com.google.cloud" % "google-cloud-pubsub" % "1.112.5" % "protobuf-src", + "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, + "com.google.auth" % "google-auth-library-oauth2-http" % "0.22.2", // pull in Pekko Discovery for our Pekko version "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) @@ -249,8 +233,7 @@ object Dependencies { libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "io.specto" % "hoverfly-java" % hoverflyVersion % Test // ApacheV2 - ) ++ Mockito) + "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) val HBase = { val hbaseVersion = "1.4.13" @@ -258,77 +241,69 @@ object Dependencies { Seq( libraryDependencies ++= Seq( ("org.apache.hbase" % "hbase-shaded-client" % hbaseVersion).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2, + "slf4j-log4j12"), ("org.apache.hbase" % "hbase-common" % hbaseVersion).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2, + "slf4j-log4j12"), ("org.apache.hadoop" % "hadoop-common" % hadoopVersion).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2, + "slf4j-log4j12"), ("org.apache.hadoop" % "hadoop-mapreduce-client-core" % hadoopVersion).exclude("log4j", "log4j").exclude( - "org.slf4j", "slf4j-log4j12"), // ApacheV2, - "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html - )) + "org.slf4j", "slf4j-log4j12"), + "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) } val HadoopVersion = "3.2.1" val Hdfs = Seq( libraryDependencies ++= Seq( ("org.apache.hadoop" % "hadoop-client" % HadoopVersion).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2 - "org.typelevel" %% "cats-core" % "2.0.0", // MIT, + "slf4j-log4j12"), + "org.typelevel" %% "cats-core" % "2.0.0", ("org.apache.hadoop" % "hadoop-hdfs" % HadoopVersion % Test).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2 + "slf4j-log4j12"), ("org.apache.hadoop" % "hadoop-common" % HadoopVersion % Test).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2 + "slf4j-log4j12"), ("org.apache.hadoop" % "hadoop-minicluster" % HadoopVersion % Test).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2 - "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html - )) + "slf4j-log4j12"), + "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) val HuaweiPushKit = Seq( libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "com.github.jwt-scala" %% "jwt-spray-json" % "7.1.4" // ApacheV2 - ) ++ Mockito) + "com.github.jwt-scala" %% "jwt-spray-json" % "7.1.4") ++ Mockito) val InfluxDB = Seq( libraryDependencies ++= Seq( - "org.influxdb" % "influxdb-java" % InfluxDBJavaVersion // MIT - )) + "org.influxdb" % "influxdb-java" % InfluxDBJavaVersion)) val IronMq = Seq( libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, - "org.mdedetrich" %% "pekko-stream-circe" % "0.0.0+97-53ec124d-SNAPSHOT", // ApacheV2 - "org.mdedetrich" %% "pekko-http-circe" % "0.0.0+97-53ec124d-SNAPSHOT" // ApacheV2 - )) + "org.mdedetrich" %% "pekko-stream-circe" % "0.0.0+97-53ec124d-SNAPSHOT", + "org.mdedetrich" %% "pekko-http-circe" % "0.0.0+97-53ec124d-SNAPSHOT")) val Jms = Seq( libraryDependencies ++= Seq( - "javax.jms" % "jms" % "1.1" % Provided, // CDDL + GPLv2 - "com.ibm.mq" % "com.ibm.mq.allclient" % "9.2.5.0" % Test, // IBM International Program License Agreement https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqdev/maven/licenses/L-APIG-AZYF2E/LI_en.html - "org.apache.activemq" % "activemq-broker" % "5.16.4" % Test, // ApacheV2 - "org.apache.activemq" % "activemq-client" % "5.16.4" % Test, // ApacheV2 - "io.github.sullis" %% "jms-testkit" % "1.0.4" % Test // ApacheV2 - ) ++ Mockito, + "javax.jms" % "jms" % "1.1" % Provided, + "com.ibm.mq" % "com.ibm.mq.allclient" % "9.2.5.0" % Test, + "org.apache.activemq" % "activemq-broker" % "5.16.4" % Test, + "org.apache.activemq" % "activemq-client" % "5.16.4" % Test, + "io.github.sullis" %% "jms-testkit" % "1.0.4" % Test) ++ Mockito, // Having JBoss as a first resolver is a workaround for https://github.com/coursier/coursier/issues/200 externalResolvers := ("jboss".at( "https://repository.jboss.org/nexus/content/groups/public")) +: externalResolvers.value) val JsonStreaming = Seq( libraryDependencies ++= Seq( - "com.github.jsurfer" % "jsurfer-jackson" % "1.6.0" // MIT - ) ++ JacksonDatabindDependencies) + "com.github.jsurfer" % "jsurfer-jackson" % "1.6.0") ++ JacksonDatabindDependencies) val Kinesis = Seq( libraryDependencies ++= Seq( - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, // ApacheV2 + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll(ExclusionRule( organization = "org.apache.pekko"))) ++ Seq( - "software.amazon.awssdk" % "kinesis" % AwsSdk2Version, // ApacheV2 - "software.amazon.awssdk" % "firehose" % AwsSdk2Version, // ApacheV2 - "software.amazon.kinesis" % "amazon-kinesis-client" % "2.4.0" // ApacheV2 - ).map( + "software.amazon.awssdk" % "kinesis" % AwsSdk2Version, + "software.amazon.awssdk" % "firehose" % AwsSdk2Version, + "software.amazon.kinesis" % "amazon-kinesis-client" % "2.4.0").map( _.excludeAll( ExclusionRule("software.amazon.awssdk", "apache-client"), ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito) @@ -336,34 +311,29 @@ object Dependencies { val KuduVersion = "1.7.1" val Kudu = Seq( libraryDependencies ++= Seq( - "org.apache.kudu" % "kudu-client-tools" % KuduVersion, // ApacheV2 - "org.apache.kudu" % "kudu-client" % KuduVersion % Test // ApacheV2 - )) + "org.apache.kudu" % "kudu-client-tools" % KuduVersion, + "org.apache.kudu" % "kudu-client" % KuduVersion % Test)) val MongoDb = Seq( libraryDependencies ++= Seq( - "org.mongodb.scala" %% "mongo-scala-driver" % "4.4.0" // ApacheV2 - )) + "org.mongodb.scala" %% "mongo-scala-driver" % "4.4.0")) val Mqtt = Seq( libraryDependencies ++= Seq( - "org.eclipse.paho" % "org.eclipse.paho.client.mqttv3" % "1.2.5" // Eclipse Public License 1.0 - )) + "org.eclipse.paho" % "org.eclipse.paho.client.mqttv3" % "1.2.5")) val MqttStreaming = Seq( libraryDependencies ++= Seq( - "org.apache.pekko" %% "pekko-actor-typed" % PekkoVersion, // ApacheV2 - "org.apache.pekko" %% "pekko-actor-testkit-typed" % PekkoVersion % Test, // ApacheV2 - "org.apache.pekko" %% "pekko-stream-typed" % PekkoVersion, // ApacheV2 - "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion % Test // ApacheV2 - )) + "org.apache.pekko" %% "pekko-actor-typed" % PekkoVersion, + "org.apache.pekko" %% "pekko-actor-testkit-typed" % PekkoVersion % Test, + "org.apache.pekko" %% "pekko-stream-typed" % PekkoVersion, + "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion % Test)) val OrientDB = Seq( libraryDependencies ++= Seq( ("com.orientechnologies" % "orientdb-graphdb" % "3.1.9") .exclude("com.tinkerpop.blueprints", "blueprints-core"), - "com.orientechnologies" % "orientdb-object" % "3.1.9" // ApacheV2 - )) + "com.orientechnologies" % "orientdb-object" % "3.1.9")) val PravegaVersion = "0.10.2" val PravegaVersionForDocs = s"v$PravegaVersion" @@ -372,8 +342,7 @@ object Dependencies { Seq( libraryDependencies ++= Seq( "io.pravega" % "pravega-client" % PravegaVersion, - "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html - )) + "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) } val Reference = Seq( @@ -387,8 +356,8 @@ object Dependencies { "org.apache.pekko" %% "pekko-http-xml" % PekkoHttpVersion, "software.amazon.awssdk" % "auth" % AwsSdk2Version, // in-memory filesystem for file related tests - "com.google.jimfs" % "jimfs" % "1.2" % Test, // ApacheV2 - "com.github.tomakehurst" % "wiremock-jre8" % "2.32.0" % Test, // ApacheV2 + "com.google.jimfs" % "jimfs" % "1.2" % Test, + "com.github.tomakehurst" % "wiremock-jre8" % "2.32.0" % Test, "org.scalacheck" %% "scalacheck" % scalaCheckVersion % Test, "org.scalatestplus" %% scalaTestScalaCheckArtifact % scalaTestScalaCheckVersion % Test, "com.markatta" %% "futiles" % "2.0.2" % Test)) @@ -409,58 +378,47 @@ object Dependencies { val SlickVersion = "3.3.3" val Slick = Seq( libraryDependencies ++= Seq( - "com.typesafe.slick" %% "slick" % SlickVersion, // BSD 2-clause "Simplified" License - "com.typesafe.slick" %% "slick-hikaricp" % SlickVersion, // BSD 2-clause "Simplified" License - "com.h2database" % "h2" % "2.1.210" % Test // Eclipse Public License 1.0 - )) + "com.typesafe.slick" %% "slick" % SlickVersion, + "com.typesafe.slick" %% "slick-hikaricp" % SlickVersion, + "com.h2database" % "h2" % "2.1.210" % Test)) val Eventbridge = Seq( libraryDependencies ++= Seq( - ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( // ApacheV2 - + ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( ExclusionRule(organization = "org.apache.pekko")), - ("software.amazon.awssdk" % "eventbridge" % AwsSdk2Version).excludeAll( // ApacheV2 - + ("software.amazon.awssdk" % "eventbridge" % AwsSdk2Version).excludeAll( ExclusionRule("software.amazon.awssdk", "apache-client"), ExclusionRule("software.amazon.awssdk", "netty-nio-client")), - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion // ApacheV2 - ) ++ Mockito) + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion) ++ Mockito) val Sns = Seq( libraryDependencies ++= Seq( - ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( // ApacheV2 - + ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( ExclusionRule(organization = "org.apache.pekko")), - ("software.amazon.awssdk" % "sns" % AwsSdk2Version).excludeAll( // ApacheV2 - + ("software.amazon.awssdk" % "sns" % AwsSdk2Version).excludeAll( ExclusionRule("software.amazon.awssdk", "apache-client"), ExclusionRule("software.amazon.awssdk", "netty-nio-client")), - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion // ApacheV2 - ) ++ Mockito) + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion) ++ Mockito) val SolrjVersion = "7.7.3" val SolrVersionForDocs = "7_7" val Solr = Seq( libraryDependencies ++= Seq( - "org.apache.solr" % "solr-solrj" % SolrjVersion, // ApacheV2 + "org.apache.solr" % "solr-solrj" % SolrjVersion, ("org.apache.solr" % "solr-test-framework" % SolrjVersion % Test).exclude("org.apache.logging.log4j", - "log4j-slf4j-impl"), // ApacheV2 - "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html - ), - resolvers += ("restlet".at("https://maven.restlet.talend.com"))) + "log4j-slf4j-impl"), + "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test), + resolvers += "restlet".at("https://maven.restlet.talend.com")) val Sqs = Seq( libraryDependencies ++= Seq( - ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( // ApacheV2 - + ("com.github.pjfanning" %% "aws-spi-pekko-http" % AwsSpiPekkoHttpVersion).excludeAll( ExclusionRule(organization = "org.apache.pekko")), - ("software.amazon.awssdk" % "sqs" % AwsSdk2Version).excludeAll( // ApacheV2 - + ("software.amazon.awssdk" % "sqs" % AwsSdk2Version).excludeAll( ExclusionRule("software.amazon.awssdk", "apache-client"), ExclusionRule("software.amazon.awssdk", "netty-nio-client")), - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, // ApacheV2 - "org.mockito" % "mockito-inline" % mockitoVersion % Test // MIT - ) ++ Mockito) + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, + "org.mockito" % "mockito-inline" % mockitoVersion % Test) ++ Mockito) val Sse = Seq( libraryDependencies ++= Seq( @@ -470,12 +428,10 @@ object Dependencies { val UnixDomainSocket = Seq( libraryDependencies ++= Seq( "com.github.jnr" % "jffi" % "1.3.1", // classifier "complete", // Is the classifier needed anymore? - "com.github.jnr" % "jnr-unixsocket" % "0.38.5" // BSD/ApacheV2/CPL/MIT as per https://github.com/akka/alpakka/issues/620#issuecomment-348727265 - )) + "com.github.jnr" % "jnr-unixsocket" % "0.38.5")) val Xml = Seq( libraryDependencies ++= Seq( - "com.fasterxml" % "aalto-xml" % "1.2.2" // ApacheV2 - )) + "com.fasterxml" % "aalto-xml" % "1.2.2")) } diff --git a/project/LicenseReport.scala b/project/LicenseReport.scala new file mode 100644 index 000000000..c9616ee94 --- /dev/null +++ b/project/LicenseReport.scala @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import sbt._ +import sbtlicensereport.SbtLicenseReport +import sbtlicensereport.SbtLicenseReport.autoImportImpl._ +import sbtlicensereport.license.{ DepModuleInfo, LicenseInfo, MarkDown } + +object LicenseReport extends AutoPlugin { + + override lazy val projectSettings = Seq( + licenseReportTypes := Seq(MarkDown), + licenseReportMakeHeader := (language => language.header1("License Report")), + licenseConfigurations := Set("compile", "test", "provided"), + licenseDepExclusions := { + case DepModuleInfo("org.apache.pekko", _, _) => true // Inter pekko project dependencies are pointless + case DepModuleInfo(_, "scala-library", _) => true // Scala library is part of Scala language + case DepModuleInfo(_, "scala-reflect", _) => true // Scala reflect is part of Scala language + }, + licenseOverrides := { + // This is here because the License URI for the unicode license isn't correct in POM + case DepModuleInfo("com.ibm.icu", "icu4j", _) => LicenseInfo.Unicode + // The asm # asm artifacts are missing license in POM and the org.ow2.asm # asm-* artifacts don't + // point to correct license page + case dep: DepModuleInfo if dep.organization.endsWith("asm") && dep.name.startsWith("asm") => + LicenseInfo( + LicenseCategory.BSD, + "BSD 3-Clause", + "https://asm.ow2.io/license.html") + // Missing license in POM + case DepModuleInfo("dom4j", "dom4j", _) => LicenseInfo( + LicenseCategory("dom4j"), + "dom4j", + "https://raw.githubusercontent.com/dom4j/dom4j/master/LICENSE") + case DepModuleInfo("org.hibernate.javax.persistence", "hibernate-jpa-2.0-api", _) => LicenseInfo.EDL + case DepModuleInfo("commons-beanutils", "commons-beanutils", _) => LicenseInfo.APACHE2 + case DepModuleInfo("io.netty", "netty-tcnative-boringssl-static", _) => LicenseInfo.APACHE2 + case DepModuleInfo("org.apache.zookeeper", "zookeeper", _) => LicenseInfo.APACHE2 + case DepModuleInfo("org.codehaus.jettison", "jettison", _) => LicenseInfo.APACHE2 + case dep: DepModuleInfo if dep.organization.startsWith("javax") => LicenseInfo.CDDL_GPL + }, + licenseReportColumns := Seq( + Column.Category, + Column.License, + Column.Dependency, + Column.OriginatingArtifactName, + Column.Configuration)) + + override def requires = plugins.JvmPlugin && SbtLicenseReport + + override def trigger = allRequirements + +} diff --git a/project/plugins.sbt b/project/plugins.sbt index 94d398e76..baa6fdd06 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -12,6 +12,7 @@ addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.1.1") addSbtPlugin("net.bzzt" % "sbt-reproducible-builds" % "0.31") addSbtPlugin("org.mdedetrich" % "sbt-apache-sonatype" % "0.1.10") addSbtPlugin("com.github.pjfanning" % "sbt-source-dist" % "0.1.8") +addSbtPlugin("com.github.sbt" % "sbt-license-report" % "1.6.1") // discipline addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.9.0") addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6") From 7d704044c3e16cd53f9a46d6f106c6e767cca828 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Thu, 3 Aug 2023 12:03:37 +0100 Subject: [PATCH 04/90] pekko-http 1.0.0 (#209) * pekko-http 1.0.0 * Update Dependencies.scala * Update plugins.sbt * Update Dependencies.scala --- project/Dependencies.scala | 12 ++++++------ project/build.properties | 2 +- project/plugins.sbt | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 0e739f797..d5630b7b9 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -18,16 +18,16 @@ object Dependencies { val Scala212 = "2.12.17" val ScalaVersions = Seq(Scala213, Scala212) - val PekkoVersion = "1.0.0" + val PekkoVersion = "1.0.1" val PekkoBinaryVersion = "current" val InfluxDBJavaVersion = "2.15" val AwsSdk2Version = "2.17.113" - val AwsSpiPekkoHttpVersion = "0.0.11+95-02ec3f55-SNAPSHOT" + val AwsSpiPekkoHttpVersion = "0.1.0" // Sync with plugins.sbt val PekkoGrpcBinaryVersion = "current" - val PekkoHttpVersion = "0.0.0+4468-963bd592-SNAPSHOT" + val PekkoHttpVersion = "1.0.0" val PekkoHttpBinaryVersion = "current" val ScalaTestVersion = "3.2.14" val TestContainersScalaTestVersion = "0.40.14" @@ -127,7 +127,7 @@ object Dependencies { libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion % Test, - "org.apache.pekko" %% "pekko-connectors-kafka" % "0.0.0+1761-2291eac2-SNAPSHOT" % Test, + "org.apache.pekko" %% "pekko-connectors-kafka" % "1.0.0" % Test, "junit" % "junit" % "4.13.2" % Test, "org.scalatest" %% "scalatest" % ScalaTestVersion % Test)) @@ -278,8 +278,8 @@ object Dependencies { val IronMq = Seq( libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, - "org.mdedetrich" %% "pekko-stream-circe" % "0.0.0+97-53ec124d-SNAPSHOT", - "org.mdedetrich" %% "pekko-http-circe" % "0.0.0+97-53ec124d-SNAPSHOT")) + "org.mdedetrich" %% "pekko-stream-circe" % "1.0.0", + "org.mdedetrich" %% "pekko-http-circe" % "1.0.0")) val Jms = Seq( libraryDependencies ++= Seq( diff --git a/project/build.properties b/project/build.properties index 875b706a8..52413ab79 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.9.2 +sbt.version=1.9.3 diff --git a/project/plugins.sbt b/project/plugins.sbt index baa6fdd06..81006e2bb 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -37,6 +37,6 @@ addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2") addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1") // Pekko gRPC -- sync with version in Dependencies.scala:19 -addSbtPlugin("org.apache.pekko" % "sbt-pekko-grpc" % "0.0.0-64-719d069a-SNAPSHOT") +addSbtPlugin("org.apache.pekko" % "sbt-pekko-grpc" % "0.0.0-73-c03eff2b-SNAPSHOT") // templating addSbtPlugin("io.spray" % "sbt-boilerplate" % "0.6.1") From 5963645ddcfbe7b6a6fde67ec3472626449119f3 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 6 Aug 2023 15:13:34 +0100 Subject: [PATCH 05/90] acknowledge use of aws code (#213) * acknowledge use of aws code * Update CopyrightHeader.scala --- LICENSE | 10 + NOTICE | 11 + build.sbt | 3 +- legal/PekkoConnectorsNotice.txt | 11 + legal/S3License.txt | 211 ++++++++++++++++++ legal/S3Notice.txt | 22 ++ legal/StandardLicense.txt | 201 +++++++++++++++++ project/CopyrightHeader.scala | 1 + project/MetaInfLicenseNoticeCopy.scala | 6 + .../pekko/stream/connectors/s3/Utils.scala | 14 +- 10 files changed, 485 insertions(+), 5 deletions(-) create mode 100644 legal/PekkoConnectorsNotice.txt create mode 100644 legal/S3License.txt create mode 100644 legal/S3Notice.txt create mode 100644 legal/StandardLicense.txt diff --git a/LICENSE b/LICENSE index 20e4bd856..d09077e81 100644 --- a/LICENSE +++ b/LICENSE @@ -199,3 +199,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +--------------- + +pekko-connectors-s3 contains code aws-sdk-java . +This code was released under an Apache 2.0 license. + + - s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala + +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/NOTICE b/NOTICE index be9af4dbd..7d12ff7a5 100644 --- a/NOTICE +++ b/NOTICE @@ -9,3 +9,14 @@ Copyright (C) 2009-2022 Lightbend Inc. Apache Pekko Connectors is derived from Alpakka 4.0.x, the last version that was distributed under the Apache License, Version 2.0 License. + +--------------- + +pekko-connectors-s3 contains code aws-sdk-java . +This code was released under an Apache 2.0 license. + +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). diff --git a/build.sbt b/build.sbt index 16ae3b173..730156ec7 100644 --- a/build.sbt +++ b/build.sbt @@ -305,7 +305,8 @@ lazy val orientdb = lazy val reference = internalProject("reference", Dependencies.Reference) .dependsOn(testkit % Test) -lazy val s3 = pekkoConnectorProject("s3", "aws.s3", Dependencies.S3) +lazy val s3 = pekkoConnectorProject("s3", "aws.s3", Dependencies.S3, + MetaInfLicenseNoticeCopy.s3Settings) lazy val pravega = pekkoConnectorProject( "pravega", diff --git a/legal/PekkoConnectorsNotice.txt b/legal/PekkoConnectorsNotice.txt new file mode 100644 index 000000000..be9af4dbd --- /dev/null +++ b/legal/PekkoConnectorsNotice.txt @@ -0,0 +1,11 @@ +Apache Pekko-Connectors +Copyright 2022, 2023 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (https://www.apache.org/). + +This product contains significant parts that were originally based on software from Lightbend (Akka ). +Copyright (C) 2009-2022 Lightbend Inc. + +Apache Pekko Connectors is derived from Alpakka 4.0.x, the last version that was distributed under the +Apache License, Version 2.0 License. diff --git a/legal/S3License.txt b/legal/S3License.txt new file mode 100644 index 000000000..d09077e81 --- /dev/null +++ b/legal/S3License.txt @@ -0,0 +1,211 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--------------- + +pekko-connectors-s3 contains code aws-sdk-java . +This code was released under an Apache 2.0 license. + + - s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala + +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/legal/S3Notice.txt b/legal/S3Notice.txt new file mode 100644 index 000000000..7d12ff7a5 --- /dev/null +++ b/legal/S3Notice.txt @@ -0,0 +1,22 @@ +Apache Pekko-Connectors +Copyright 2022, 2023 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (https://www.apache.org/). + +This product contains significant parts that were originally based on software from Lightbend (Akka ). +Copyright (C) 2009-2022 Lightbend Inc. + +Apache Pekko Connectors is derived from Alpakka 4.0.x, the last version that was distributed under the +Apache License, Version 2.0 License. + +--------------- + +pekko-connectors-s3 contains code aws-sdk-java . +This code was released under an Apache 2.0 license. + +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). diff --git a/legal/StandardLicense.txt b/legal/StandardLicense.txt new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/legal/StandardLicense.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/project/CopyrightHeader.scala b/project/CopyrightHeader.scala index f72e2a48d..5b53d1596 100644 --- a/project/CopyrightHeader.scala +++ b/project/CopyrightHeader.scala @@ -118,6 +118,7 @@ trait CopyrightHeader extends AutoPlugin { private def isApacheCopyrighted(text: String): Boolean = StringUtils.containsIgnoreCase(text, "licensed to the apache software foundation (asf)") || StringUtils.containsIgnoreCase(text, "www.apache.org/licenses/license-2.0") || + StringUtils.contains(text, "Apache License, Version 2.0") || StringUtils.contains(text, "Apache-2.0") private def isLightbendCopyrighted(text: String): Boolean = diff --git a/project/MetaInfLicenseNoticeCopy.scala b/project/MetaInfLicenseNoticeCopy.scala index 019e502d7..d210ea72e 100644 --- a/project/MetaInfLicenseNoticeCopy.scala +++ b/project/MetaInfLicenseNoticeCopy.scala @@ -28,8 +28,14 @@ object MetaInfLicenseNoticeCopy extends AutoPlugin { private lazy val baseDir = LocalRootProject / baseDirectory override lazy val projectSettings = Seq( + apacheSonatypeLicenseFile := baseDir.value / "legal" / "StandardLicense.txt", + apacheSonatypeNoticeFile := baseDir.value / "legal" / "PekkoConnectorsNotice.txt", apacheSonatypeDisclaimerFile := Some(baseDir.value / "DISCLAIMER")) + lazy val s3Settings = Seq( + apacheSonatypeLicenseFile := baseDir.value / "legal" / "S3License.txt", + apacheSonatypeNoticeFile := baseDir.value / "legal" / "S3Notice.txt") + override def trigger = allRequirements override def requires = ApacheSonatypePlugin diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala index 64a25e00a..a290ec1a8 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala @@ -1,10 +1,16 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * license agreements; and to You under the Apache License, version 2.0: + * Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * https://www.apache.org/licenses/LICENSE-2.0 + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * This file is part of the Apache Pekko project, which was derived from Akka. + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ /* From 3b29beabcc54db0791539776159f238e8ad91e45 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 6 Aug 2023 18:13:01 +0100 Subject: [PATCH 06/90] add paho license (#214) * add paho license * scalafmt --- LICENSE | 25 +- NOTICE | 2 +- build.sbt | 4 +- legal/MqttStreamingLicense.txt | 224 ++++++++++++++++++ legal/S3License.txt | 2 +- legal/S3Notice.txt | 2 +- .../mqtt/streaming/impl/RequestState.scala | 21 +- project/MetaInfLicenseNoticeCopy.scala | 3 + 8 files changed, 277 insertions(+), 6 deletions(-) create mode 100644 legal/MqttStreamingLicense.txt diff --git a/LICENSE b/LICENSE index d09077e81..dff33d762 100644 --- a/LICENSE +++ b/LICENSE @@ -202,7 +202,30 @@ --------------- -pekko-connectors-s3 contains code aws-sdk-java . +pekko-mqtt-streaming contains code from paho.mqtt.java . +This code was released under a dual license: +Eclipse Public License version 2.0 and Eclipse Distribution License. +We choose to use the code under the Eclipse Distribution License. + + - mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala + +Eclipse Public License - v 2.0 + +This program and the accompanying materials +are made available under the terms of the Eclipse Public License v2.0 +and Eclipse Distribution License v1.0 which accompany this distribution. + +The Eclipse Public License is available at + https://www.eclipse.org/legal/epl-2.0/ +and the Eclipse Distribution License is available at + https://www.eclipse.org/org/documents/edl-v10.php + +For an explanation of what dual-licensing means to you, see: +https://www.eclipse.org/legal/eplfaq.php#DUALLIC + +--------------- + +pekko-connectors-s3 contains code from aws-sdk-java . This code was released under an Apache 2.0 license. - s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala diff --git a/NOTICE b/NOTICE index 7d12ff7a5..598d8a966 100644 --- a/NOTICE +++ b/NOTICE @@ -12,7 +12,7 @@ Apache License, Version 2.0 License. --------------- -pekko-connectors-s3 contains code aws-sdk-java . +pekko-connectors-s3 contains code from aws-sdk-java . This code was released under an Apache 2.0 license. AWS SDK for Java diff --git a/build.sbt b/build.sbt index 730156ec7..4ccaa811b 100644 --- a/build.sbt +++ b/build.sbt @@ -288,7 +288,9 @@ lazy val mongodb = pekkoConnectorProject("mongodb", "mongodb", Dependencies.Mong lazy val mqtt = pekkoConnectorProject("mqtt", "mqtt", Dependencies.Mqtt) lazy val mqttStreaming = - pekkoConnectorProject("mqtt-streaming", "mqttStreaming", Dependencies.MqttStreaming) + pekkoConnectorProject("mqtt-streaming", "mqttStreaming", Dependencies.MqttStreaming, + MetaInfLicenseNoticeCopy.mqttStreamingSettings) + lazy val mqttStreamingBench = internalProject("mqtt-streaming-bench") .enablePlugins(JmhPlugin) .dependsOn(mqtt, mqttStreaming) diff --git a/legal/MqttStreamingLicense.txt b/legal/MqttStreamingLicense.txt new file mode 100644 index 000000000..9106a9ee9 --- /dev/null +++ b/legal/MqttStreamingLicense.txt @@ -0,0 +1,224 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--------------- + +pekko-mqtt-streaming contains code from paho.mqtt.java . +This code was released under a dual license: +Eclipse Public License version 2.0 and Eclipse Distribution License. +We choose to use the code under the Eclipse Distribution License. + + - mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala + +Eclipse Public License - v 2.0 + +This program and the accompanying materials +are made available under the terms of the Eclipse Public License v2.0 +and Eclipse Distribution License v1.0 which accompany this distribution. + +The Eclipse Public License is available at + https://www.eclipse.org/legal/epl-2.0/ +and the Eclipse Distribution License is available at + https://www.eclipse.org/org/documents/edl-v10.php + +For an explanation of what dual-licensing means to you, see: +https://www.eclipse.org/legal/eplfaq.php#DUALLIC diff --git a/legal/S3License.txt b/legal/S3License.txt index d09077e81..e64484eab 100644 --- a/legal/S3License.txt +++ b/legal/S3License.txt @@ -202,7 +202,7 @@ --------------- -pekko-connectors-s3 contains code aws-sdk-java . +pekko-connectors-s3 contains code from aws-sdk-java . This code was released under an Apache 2.0 license. - s3/src/main/scala/org/apache/pekko/stream/connectors/s3/Utils.scala diff --git a/legal/S3Notice.txt b/legal/S3Notice.txt index 7d12ff7a5..598d8a966 100644 --- a/legal/S3Notice.txt +++ b/legal/S3Notice.txt @@ -12,7 +12,7 @@ Apache License, Version 2.0 License. --------------- -pekko-connectors-s3 contains code aws-sdk-java . +pekko-connectors-s3 contains code from aws-sdk-java . This code was released under an Apache 2.0 license. AWS SDK for Java diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala index 22bbdd015..da54ba8e3 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala @@ -11,6 +11,22 @@ * Copyright (C) since 2016 Lightbend Inc. */ +/** + * Copyright (c) 2009, 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v2.0 + * and Eclipse Distribution License v1.0 which accompany this distribution. + * + * The Eclipse Public License is available at + * https://www.eclipse.org/legal/epl-2.0 + * and the Eclipse Distribution License is available at + * https://www.eclipse.org/org/documents/edl-v10.php + * + * Contributors: + * Dave Locke - initial API and implementation and/or initial documentation + */ + package org.apache.pekko.stream.connectors.mqtt.streaming package impl @@ -615,7 +631,10 @@ object Topics { * 4.7 Topic Names and Topic Filters * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html * - * Inspired by https://github.com/eclipse/paho.mqtt.java/blob/master/org.eclipse.paho.client.mqttv3/src/main/java/org/eclipse/paho/client/mqttv3/MqttTopic.java#L240 + * Inspired by https://github.com/eclipse/paho.mqtt.java/blob/master/org.eclipse.paho.client.mqttv3/src/main/java/org/eclipse/paho/client/mqttv3/MqttTopic.java + * + * The Apache Pekko project chooses to use the paho.mqtt.java inspired code under the + * Eclipse Distribution License */ def filter(topicFilterName: String, topicName: String): Boolean = { @tailrec diff --git a/project/MetaInfLicenseNoticeCopy.scala b/project/MetaInfLicenseNoticeCopy.scala index d210ea72e..f2f7d7138 100644 --- a/project/MetaInfLicenseNoticeCopy.scala +++ b/project/MetaInfLicenseNoticeCopy.scala @@ -32,6 +32,9 @@ object MetaInfLicenseNoticeCopy extends AutoPlugin { apacheSonatypeNoticeFile := baseDir.value / "legal" / "PekkoConnectorsNotice.txt", apacheSonatypeDisclaimerFile := Some(baseDir.value / "DISCLAIMER")) + lazy val mqttStreamingSettings = Seq( + apacheSonatypeLicenseFile := baseDir.value / "legal" / "MqttStreamingLicense.txt") + lazy val s3Settings = Seq( apacheSonatypeLicenseFile := baseDir.value / "legal" / "S3License.txt", apacheSonatypeNoticeFile := baseDir.value / "legal" / "S3Notice.txt") From 158a03247d7c1569768c75a8e083cede461caa27 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 6 Aug 2023 22:48:22 +0100 Subject: [PATCH 07/90] sbt-source-dist 0.1.10 (#212) --- project/plugins.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index 81006e2bb..6279dc6c9 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -11,7 +11,7 @@ addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.4.3") addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.1.1") addSbtPlugin("net.bzzt" % "sbt-reproducible-builds" % "0.31") addSbtPlugin("org.mdedetrich" % "sbt-apache-sonatype" % "0.1.10") -addSbtPlugin("com.github.pjfanning" % "sbt-source-dist" % "0.1.8") +addSbtPlugin("com.github.pjfanning" % "sbt-source-dist" % "0.1.10") addSbtPlugin("com.github.sbt" % "sbt-license-report" % "1.6.1") // discipline addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.9.0") From 703e9ccaf60530356ef665c8d61aee2b9ea40a42 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Fri, 11 Aug 2023 12:47:35 +0200 Subject: [PATCH 08/90] Update to latest pekko-grpc-sbt-plugin snapshot --- project/plugins.sbt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index 6279dc6c9..bff2d3ae3 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -36,7 +36,7 @@ addSbtPlugin(("com.lightbend.paradox" % "sbt-paradox-project-info" % "2.0.0").fo addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2") addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1") -// Pekko gRPC -- sync with version in Dependencies.scala:19 -addSbtPlugin("org.apache.pekko" % "sbt-pekko-grpc" % "0.0.0-73-c03eff2b-SNAPSHOT") +// Pekko gRPC -- sync with version in Dependencies.scala:29 +addSbtPlugin("org.apache.pekko" % "pekko-grpc-sbt-plugin" % "1.0.0-RC1-3-ae23c14d-SNAPSHOT") // templating addSbtPlugin("io.spray" % "sbt-boilerplate" % "0.6.1") From 1a543d61a6e3577f88ba401198077bdca036f222 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Fri, 11 Aug 2023 18:33:17 +0200 Subject: [PATCH 09/90] Update scalafmt --- .github/workflows/format.yml | 2 +- .scalafmt.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 9d330b61d..d90635f52 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -20,5 +20,5 @@ jobs: - name: Check project is formatted uses: jrouly/scalafmt-native-action@v2 with: - version: '3.7.1' + version: '3.7.11' arguments: '--list --mode diff-ref=origin/main' diff --git a/.scalafmt.conf b/.scalafmt.conf index 1aae8a44a..23ae71227 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,4 +1,4 @@ -version = 3.7.1 +version = 3.7.11 runner.dialect = scala213 project.git = true style = defaultWithAlign From 5e339991b14da475b382dcbb192efc391aab21c1 Mon Sep 17 00:00:00 2001 From: Auto Format Date: Fri, 11 Aug 2023 18:34:01 +0200 Subject: [PATCH 10/90] format source with scalafmt, #219 --- jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala index aef4deb58..42906d2b3 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala @@ -221,7 +221,7 @@ class JmsConnectorsSpec extends JmsSpec { // #jms-source control.shutdown() - // #jms-source + // #jms-source } "publish and consume JMS text messages" in withConnectionFactory() { connectionFactory => From 78f2862a52ba0c700cba39284a849833bda92faf Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sat, 12 Aug 2023 09:52:18 +0200 Subject: [PATCH 11/90] Add scalafmt to .git-blame-ignore-revs --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 2c1b9e72b..fba88e3a3 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,5 +1,6 @@ # scalafmt 433daa5c5b0b2c3f85c6f1c7e62a4f4171856468 +5e339991b14da475b382dcbb192efc391aab21c1 # manual 1225828a913a61eb48123410077123c737c1b84e From 3f586ac4d2334634f5f8800254cf1327a009593b Mon Sep 17 00:00:00 2001 From: Sergey Gornostaev Date: Mon, 14 Aug 2023 18:11:46 +0800 Subject: [PATCH 12/90] FTP: Add a setting to enable or disable automatic server encoding detection (#221) --- .../connectors/ftp/impl/FtpOperations.scala | 4 ++++ .../connectors/ftp/impl/FtpsOperations.scala | 4 ++++ .../pekko/stream/connectors/ftp/model.scala | 18 ++++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala index 36ac044fb..e4c46e45d 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala @@ -28,6 +28,10 @@ private[ftp] trait FtpOperations extends CommonFtpOperations { _: FtpLike[FTPCli def connect(connectionSettings: FtpSettings)(implicit ftpClient: FTPClient): Try[Handler] = Try { connectionSettings.proxy.foreach(ftpClient.setProxy) + if (ftpClient.getAutodetectUTF8() != connectionSettings.autodetectUTF8) { + ftpClient.setAutodetectUTF8(connectionSettings.autodetectUTF8) + } + try { ftpClient.connect(connectionSettings.host, connectionSettings.port) } catch { diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala index b1bd6ffc1..7c672f745 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala @@ -31,6 +31,10 @@ private[ftp] trait FtpsOperations extends CommonFtpOperations { Try { connectionSettings.proxy.foreach(ftpClient.setProxy) + if (ftpClient.getAutodetectUTF8() != connectionSettings.autodetectUTF8) { + ftpClient.setAutodetectUTF8(connectionSettings.autodetectUTF8) + } + ftpClient.connect(connectionSettings.host, connectionSettings.port) connectionSettings.configureConnection(ftpClient) diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala index 4be0747f2..e489d09dd 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala @@ -68,6 +68,8 @@ abstract sealed class FtpFileSettings extends RemoteFileSettings { * @param credentials credentials (username and password) * @param binary specifies the file transfer mode, BINARY or ASCII. Default is ASCII (false) * @param passiveMode specifies whether to use passive mode connections. Default is active mode (false) + * @param autodetectUTF8 enables or disables automatic server encoding detection (only UTF-8 supported). + * Disabled by default (false). * @param configureConnection A function which will be called after connecting to the server. Use this for * any custom configuration required by the server you are connecting to. * @param proxy An optional proxy to use when connecting with these settings @@ -78,6 +80,7 @@ final class FtpSettings private ( val credentials: FtpCredentials, val binary: Boolean, val passiveMode: Boolean, + val autodetectUTF8: Boolean, val configureConnection: FTPClient => Unit, val proxy: Option[Proxy]) extends FtpFileSettings { @@ -87,6 +90,8 @@ final class FtpSettings private ( def withBinary(value: Boolean): FtpSettings = if (binary == value) this else copy(binary = value) def withPassiveMode(value: Boolean): FtpSettings = if (passiveMode == value) this else copy(passiveMode = value) + def withAutodetectUTF8(value: Boolean): FtpSettings = + if (autodetectUTF8 == value) this else copy(autodetectUTF8 = value) def withProxy(value: Proxy): FtpSettings = copy(proxy = Some(value)) /** @@ -110,6 +115,7 @@ final class FtpSettings private ( credentials: FtpCredentials = credentials, binary: Boolean = binary, passiveMode: Boolean = passiveMode, + autodetectUTF8: Boolean = autodetectUTF8, configureConnection: FTPClient => Unit = configureConnection, proxy: Option[Proxy] = proxy): FtpSettings = new FtpSettings( host = host, @@ -117,6 +123,7 @@ final class FtpSettings private ( credentials = credentials, binary = binary, passiveMode = passiveMode, + autodetectUTF8 = autodetectUTF8, configureConnection = configureConnection, proxy = proxy) @@ -127,6 +134,7 @@ final class FtpSettings private ( s"credentials=$credentials," + s"binary=$binary," + s"passiveMode=$passiveMode," + + s"autodetectUTF8=$autodetectUTF8" + s"configureConnection=$configureConnection," + s"proxy=$proxy)" } @@ -146,6 +154,7 @@ object FtpSettings { credentials = FtpCredentials.AnonFtpCredentials, binary = false, passiveMode = false, + autodetectUTF8 = false, configureConnection = _ => (), proxy = None) @@ -162,6 +171,8 @@ object FtpSettings { * @param credentials credentials (username and password) * @param binary specifies the file transfer mode, BINARY or ASCII. Default is ASCII (false) * @param passiveMode specifies whether to use passive mode connections. Default is active mode (false) + * @param autodetectUTF8 enables or disables automatic server encoding detection (only UTF-8 supported). + * Disabled by default (false). * @param configureConnection A function which will be called after connecting to the server. Use this for * any custom configuration required by the server you are connecting to. * @param proxy An optional proxy to use when connecting with these settings @@ -172,6 +183,7 @@ final class FtpsSettings private ( val credentials: FtpCredentials, val binary: Boolean, val passiveMode: Boolean, + val autodetectUTF8: Boolean, val configureConnection: FTPSClient => Unit, val proxy: Option[Proxy]) extends FtpFileSettings { @@ -181,6 +193,8 @@ final class FtpsSettings private ( def withBinary(value: Boolean): FtpsSettings = if (binary == value) this else copy(binary = value) def withPassiveMode(value: Boolean): FtpsSettings = if (passiveMode == value) this else copy(passiveMode = value) + def withAutodetectUTF8(value: Boolean): FtpsSettings = + if (autodetectUTF8 == value) this else copy(autodetectUTF8 = value) def withProxy(value: Proxy): FtpsSettings = copy(proxy = Some(value)) /** @@ -204,6 +218,7 @@ final class FtpsSettings private ( credentials: FtpCredentials = credentials, binary: Boolean = binary, passiveMode: Boolean = passiveMode, + autodetectUTF8: Boolean = autodetectUTF8, configureConnection: FTPSClient => Unit = configureConnection, proxy: Option[Proxy] = proxy): FtpsSettings = new FtpsSettings( host = host, @@ -211,6 +226,7 @@ final class FtpsSettings private ( credentials = credentials, binary = binary, passiveMode = passiveMode, + autodetectUTF8 = autodetectUTF8, configureConnection = configureConnection, proxy = proxy) @@ -221,6 +237,7 @@ final class FtpsSettings private ( s"credentials=$credentials," + s"binary=$binary," + s"passiveMode=$passiveMode," + + s"autodetectUTF8=$autodetectUTF8" + s"configureConnection=$configureConnection," + s"proxy=$proxy)" } @@ -240,6 +257,7 @@ object FtpsSettings { FtpCredentials.AnonFtpCredentials, binary = false, passiveMode = false, + autodetectUTF8 = false, configureConnection = _ => (), proxy = None) From 329eaa8501a5194655c2340fbb226124a57c9df7 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Tue, 1 Aug 2023 09:58:24 +0200 Subject: [PATCH 13/90] Add sbt-header to code style checks --- build.sbt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.sbt b/build.sbt index 4ccaa811b..a6b815c93 100644 --- a/build.sbt +++ b/build.sbt @@ -129,8 +129,8 @@ lazy val `pekko-connectors` = project crossScalaVersions := List() // workaround for https://github.com/sbt/sbt/issues/3465 ) -addCommandAlias("applyCodeStyle", ";scalafmtAll; scalafmtSbt; javafmtAll") -addCommandAlias("checkCodeStyle", ";scalafmtCheckAll; scalafmtSbtCheck; javafmtCheckAll") +addCommandAlias("applyCodeStyle", ";scalafmtAll; scalafmtSbt; javafmtAll; +headerCreateAll") +addCommandAlias("checkCodeStyle", "+headerCheckAll; ;scalafmtCheckAll; scalafmtSbtCheck; javafmtCheckAll") lazy val amqp = pekkoConnectorProject("amqp", "amqp", Dependencies.Amqp) From f0b637ddafe1d0e885191bb13c6ccf9dc8914daf Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Tue, 30 May 2023 14:44:18 +0100 Subject: [PATCH 14/90] try scala 3 (#123) --- .../amqp/impl/AmqpReplyToSinkStage.scala | 4 +- .../amqp/impl/AmqpRpcFlowStage.scala | 5 +- .../amqp/impl/AmqpSimpleFlowStage.scala | 4 +- .../connectors/amqp/scaladsl/AmqpSource.scala | 4 +- .../scala/docs/scaladsl/AmqpDocsSpec.scala | 2 +- .../stream/connectors/amqp/AmqpSpec.scala | 6 +- ...raphStageLogicConnectionShutdownSpec.scala | 9 +-- .../scaladsl/EventBridgePublisherSpec.scala | 2 +- .../docs/scaladsl/AwsLambdaFlowSpec.scala | 7 +- .../storagequeue/javadsl/AzureQueueSink.scala | 2 +- .../cassandra/javadsl/CassandraSession.scala | 2 +- .../scaladsl/CassandraLifecycle.scala | 5 +- .../couchbase/scaladsl/CouchbaseFlow.scala | 10 ++- .../couchbase/scaladsl/DiscoverySupport.scala | 6 +- .../connectors/csv/scaladsl/CsvBench.scala | 9 +-- .../test/scala/docs/scaladsl/CsvSpec.scala | 2 +- .../eip/scaladsl/PassThroughExamples.scala | 4 +- .../scala/docs/scaladsl/ExampleSpec.scala | 3 +- .../ElasticsearchConnectorBehaviour.scala | 6 +- .../scaladsl/ElasticsearchSpecUtils.scala | 2 +- .../docs/scaladsl/ElasticsearchV5Spec.scala | 18 ++--- .../docs/scaladsl/ElasticsearchV7Spec.scala | 18 ++--- .../OpensearchConnectorBehaviour.scala | 6 +- .../docs/scaladsl/OpensearchV1Spec.scala | 18 ++--- .../file/javadsl/LogRotatorSink.scala | 33 ++++---- .../file/scaladsl/LogRotatorSink.scala | 2 +- .../docs/scaladsl/LogRotatorSinkSpec.scala | 4 +- .../connectors/ftp/CommonFtpStageSpec.scala | 13 +-- .../scala/docs/scaladsl/GeodeBaseSpec.scala | 2 +- .../bigquery/BigQueryException.scala | 2 +- .../model/TableDataJsonProtocol.scala | 5 +- .../bigquery/scaladsl/BigQueryDatasets.scala | 4 +- .../bigquery/scaladsl/BigQueryJobs.scala | 8 +- .../bigquery/scaladsl/BigQueryQueries.scala | 11 +-- .../bigquery/scaladsl/BigQueryTableData.scala | 7 +- .../bigquery/scaladsl/BigQueryTables.scala | 4 +- .../scaladsl/BigQueryQueriesSpec.scala | 2 +- .../scala/docs/scaladsl/IntegrationSpec.scala | 2 +- .../googlecloud/pubsub/impl/PubSubApi.scala | 16 ++-- .../scala/docs/scaladsl/ExampleUsage.scala | 2 +- .../scala/docs/scaladsl/IntegrationSpec.scala | 2 +- .../googlecloud/pubsub/GooglePubSubSpec.scala | 4 +- .../pubsub/impl/PubSubApiSpec.scala | 2 +- .../googlecloud/storage/impl/Formats.scala | 17 ++-- .../storage/impl/GCStorageStream.scala | 8 +- .../googlecloud/storage/GCSExtSpec.scala | 2 +- .../storage/GCStorageExtSpec.scala | 2 +- .../storage/WithMaterializerGlobal.scala | 4 +- .../connectors/google/PaginatedRequest.scala | 5 +- .../connectors/google/ResumableUpload.scala | 9 ++- .../google/auth/GoogleComputeMetadata.scala | 5 +- .../connectors/google/auth/GoogleOAuth2.scala | 5 +- .../google/auth/GoogleOAuth2Credentials.scala | 2 +- .../google/auth/GoogleOAuth2Exception.scala | 3 +- .../auth/ServiceAccountCredentials.scala | 2 +- .../google/auth/UserAccessCredentials.scala | 2 +- .../google/auth/UserAccessMetadata.scala | 3 +- .../connectors/google/http/GoogleHttp.scala | 6 +- .../google/PaginatedRequestSpec.scala | 2 +- .../google/ResumableUploadSpec.scala | 2 +- .../google/auth/GoogleOAuth2Spec.scala | 8 +- .../google/auth/OAuth2CredentialsSpec.scala | 3 +- .../google/http/GoogleHttpSpec.scala | 4 +- .../google/firebase/fcm/impl/FcmFlows.scala | 4 +- .../firebase/fcm/impl/FcmJsonSupport.scala | 10 ++- .../firebase/fcm/v1/impl/FcmJsonSupport.scala | 4 +- .../scala/docs/scaladsl/FcmExamples.scala | 4 +- .../firebase/fcm/v1/impl/FcmSenderSpec.scala | 4 +- .../hbase/impl/HBaseFlowStage.scala | 2 +- .../hbase/impl/HBaseSourceStage.scala | 4 +- .../scala/docs/scaladsl/HBaseStageSpec.scala | 2 +- .../scala/docs/scaladsl/HdfsWriterSpec.scala | 2 +- .../pushkit/impl/PushKitJsonSupport.scala | 2 +- .../scala/docs/scaladsl/PushKitExamples.scala | 4 +- .../huawei/pushkit/impl/HmsTokenApiSpec.scala | 2 +- .../pushkit/impl/PushKitSenderSpec.scala | 2 +- .../test/scala/docs/scaladsl/FlowSpec.scala | 6 +- .../docs/scaladsl/InfluxDbSourceSpec.scala | 2 +- .../scala/docs/scaladsl/InfluxDbSpec.scala | 4 +- .../ironmq/impl/IronMqPullStage.scala | 4 +- .../stream/connectors/ironmq/IronMqSpec.scala | 2 +- .../JmsBufferedAckConnectorsSpec.scala | 2 +- .../scaladsl/JmsIbmmqConnectorsSpec.scala | 2 +- .../docs/scaladsl/JmsTxConnectorsSpec.scala | 2 +- .../pekko/stream/connectors/jms/JmsSpec.scala | 2 +- .../jms/scaladsl/JmsAckConnectorsSpec.scala | 2 +- .../connectors/kinesis/ShardIterator.scala | 2 +- .../kinesis/KinesisSchedulerSourceSpec.scala | 36 ++++----- .../scala/docs/scaladsl/MongoSinkSpec.scala | 9 +-- .../scala/docs/scaladsl/MongoSourceSpec.scala | 4 +- .../scala/docs/scaladsl/OrientDbSpec.scala | 12 +-- .../docs/scaladsl/PravegaReadWriteDocs.scala | 12 +-- .../docs/scaladsl/PravegaSettingsSpec.scala | 4 +- .../scala/docs/scaladsl/Serializers.scala | 6 +- project/Common.scala | 24 ++++-- project/Dependencies.scala | 35 +++++++- .../connectors/reference/Resource.scala | 2 +- .../connectors/s3/impl/HttpRequestsSpec.scala | 79 +++++++++++-------- .../connectors/s3/impl/S3StreamSpec.scala | 4 +- .../connectors/s3/impl/auth/SignerSpec.scala | 2 +- .../scala/docs/scaladsl/DocSnippets.scala | 22 +++--- .../test/scala/docs/scaladsl/SlickSpec.scala | 10 +-- .../docs/scaladsl/SnsPublisherSpec.scala | 2 +- .../test/scala/docs/scaladsl/SolrSpec.scala | 32 ++++---- .../test/scala/docs/scaladsl/SqsAckSpec.scala | 8 +- .../scala/docs/scaladsl/SqsSourceSpec.scala | 2 +- .../scala/docs/scaladsl/EventSourceSpec.scala | 8 +- .../test/scala/docs/scaladsl/UdpSpec.scala | 4 +- .../scaladsl/UnixDomainSocket.scala | 2 +- 109 files changed, 418 insertions(+), 350 deletions(-) diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala index ffe30b6dd..e012e5e1d 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala @@ -28,7 +28,7 @@ import scala.concurrent.{ Future, Promise } * the queue named in the replyTo options of the message instead of from settings declared at construction. */ @InternalApi -private[amqp] final class AmqpReplyToSinkStage(settings: AmqpReplyToSinkSettings) +private[amqp] final class AmqpReplyToSinkStage(replyToSinkSettings: AmqpReplyToSinkSettings) extends GraphStageWithMaterializedValue[SinkShape[WriteMessage], Future[Done]] { stage => val in = Inlet[WriteMessage]("AmqpReplyToSink.in") @@ -41,7 +41,7 @@ private[amqp] final class AmqpReplyToSinkStage(settings: AmqpReplyToSinkSettings override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val streamCompletion = Promise[Done]() (new GraphStageLogic(shape) with AmqpConnectorLogic { - override val settings = stage.settings + override val settings: AmqpReplyToSinkSettings = stage.replyToSinkSettings override def whenConnected(): Unit = pull(in) diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala index 621d97b7d..289ff27eb 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala @@ -37,7 +37,8 @@ import scala.util.Success * can be overridden per message by including `expectedReplies` in the the header of the [[pekko.stream.connectors.amqp.WriteMessage]] */ @InternalApi -private[amqp] final class AmqpRpcFlowStage(settings: AmqpWriteSettings, bufferSize: Int, responsesPerMessage: Int = 1) +private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, bufferSize: Int, + responsesPerMessage: Int = 1) extends GraphStageWithMaterializedValue[FlowShape[WriteMessage, CommittableReadResult], Future[String]] { stage => @@ -53,7 +54,7 @@ private[amqp] final class AmqpRpcFlowStage(settings: AmqpWriteSettings, bufferSi val streamCompletion = Promise[String]() (new GraphStageLogic(shape) with AmqpConnectorLogic { - override val settings = stage.settings + override val settings: AmqpWriteSettings = stage.writeSettings private val exchange = settings.exchange.getOrElse("") private val routingKey = settings.routingKey.getOrElse("") private val queue = mutable.Queue[CommittableReadResult]() diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala index 69b772d3a..5d24b4447 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala @@ -32,7 +32,7 @@ import scala.concurrent.{ Future, Promise } * instead of complete [[WriteResult]] (possibly it would be less confusing for users), but [[WriteResult]] is used * for consistency with other variants and to make the flow ready for any possible future [[WriteResult]] extensions. */ -@InternalApi private[amqp] final class AmqpSimpleFlowStage[T](settings: AmqpWriteSettings) +@InternalApi private[amqp] final class AmqpSimpleFlowStage[T](writeSettings: AmqpWriteSettings) extends GraphStageWithMaterializedValue[FlowShape[(WriteMessage, T), (WriteResult, T)], Future[Done]] { stage => private val in: Inlet[(WriteMessage, T)] = Inlet(Logging.simpleName(this) + ".in") @@ -45,7 +45,7 @@ import scala.concurrent.{ Future, Promise } override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val streamCompletion = Promise[Done]() - (new AbstractAmqpFlowStageLogic[T](settings, streamCompletion, shape) { + (new AbstractAmqpFlowStageLogic[T](writeSettings, streamCompletion, shape) { override def publish(message: WriteMessage, passThrough: T): Unit = { log.debug("Publishing message {}.", message) diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala index 5e2fa8d16..9cca3ac32 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala @@ -20,8 +20,10 @@ import pekko.stream.connectors.amqp.impl import pekko.stream.connectors.amqp.{ AmqpSourceSettings, ReadResult } import pekko.stream.scaladsl.Source +import scala.concurrent.ExecutionContext + object AmqpSource { - private implicit val executionContext = ExecutionContexts.parasitic + private implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic /** * Scala API: Convenience for "at-most once delivery" semantics. Each message is acked to RabbitMQ diff --git a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala index 6eabe9944..dab24b6a3 100644 --- a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala +++ b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala @@ -33,7 +33,7 @@ import scala.collection.immutable */ class AmqpDocsSpec extends AmqpSpec { - override implicit val patienceConfig = PatienceConfig(10.seconds) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds) val businessLogic: CommittableReadResult => Future[CommittableReadResult] = Future.successful(_) diff --git a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala index e472ef917..f51e5a3b2 100644 --- a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala +++ b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala @@ -22,10 +22,12 @@ import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec +import scala.concurrent.ExecutionContext + abstract class AmqpSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with ScalaFutures with LogCapturing { - implicit val system = ActorSystem(this.getClass.getSimpleName) - implicit val executionContext = ExecutionContexts.parasitic + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName) + implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic override protected def afterAll(): Unit = system.terminate() diff --git a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala index 3240c4b5d..23be429ca 100644 --- a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala +++ b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala @@ -15,7 +15,6 @@ package org.apache.pekko.stream.connectors.amqp.scaladsl import java.util.concurrent.ExecutorService import java.util.concurrent.atomic.AtomicInteger - import org.apache.pekko import pekko.actor.ActorSystem import pekko.dispatch.ExecutionContexts @@ -33,7 +32,7 @@ import com.rabbitmq.client.{ AddressResolver, Connection, ConnectionFactory, Shu import org.scalatest.concurrent.ScalaFutures import org.scalatest.BeforeAndAfterEach -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration._ import scala.util.control.NonFatal import org.scalatest.matchers.should.Matchers @@ -50,8 +49,8 @@ class AmqpGraphStageLogicConnectionShutdownSpec with BeforeAndAfterEach with LogCapturing { - override implicit val patienceConfig = PatienceConfig(10.seconds) - private implicit val executionContext = ExecutionContexts.parasitic + override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds) + private implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic val shutdownsAdded = new AtomicInteger() val shutdownsRemoved = new AtomicInteger() @@ -76,7 +75,7 @@ class AmqpGraphStageLogicConnectionShutdownSpec "registers and unregisters a single connection shutdown hook per graph" in { // actor system is within this test as it has to be shut down in order // to verify graph stage termination - implicit val system = ActorSystem(this.getClass.getSimpleName + System.currentTimeMillis()) + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName + System.currentTimeMillis()) val connectionFactory = new ConnectionFactory() { override def newConnection(es: ExecutorService, ar: AddressResolver, name: String): Connection = diff --git a/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala b/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala index 38e70da6c..517831f51 100644 --- a/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala +++ b/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala @@ -28,7 +28,7 @@ import scala.concurrent.duration._ class EventBridgePublisherSpec extends AnyFlatSpec with Matchers with ScalaFutures with IntegrationTestContext { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 15.seconds, interval = 100.millis) "EventBridge Publisher sink" should "send PutEventsEntry message" in { diff --git a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala index 1dd83c99a..4cad3b954 100644 --- a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala +++ b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala @@ -14,7 +14,6 @@ package docs.scaladsl import java.util.concurrent.CompletableFuture - import org.apache.pekko import pekko.actor.ActorSystem import pekko.stream.connectors.awslambda.scaladsl.AwsLambdaFlow @@ -36,7 +35,7 @@ import software.amazon.awssdk.core.SdkBytes import software.amazon.awssdk.services.lambda.LambdaAsyncClient import software.amazon.awssdk.services.lambda.model.{ InvokeRequest, InvokeResponse } -import scala.concurrent.Await +import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration._ class AwsLambdaFlowSpec @@ -49,9 +48,9 @@ class AwsLambdaFlowSpec with MockitoSugar with LogCapturing { - implicit val ec = system.dispatcher + implicit val ec: ExecutionContext = system.dispatcher - implicit val awsLambdaClient = mock[LambdaAsyncClient] + implicit val awsLambdaClient: LambdaAsyncClient = mock[LambdaAsyncClient] override protected def afterEach(): Unit = { reset(awsLambdaClient) diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala index ecca1b8b3..ee5d7d9ee 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala @@ -51,7 +51,7 @@ object AzureQueueWithTimeoutsSink { * of a [[com.microsoft.azure.storage.queue.CloudQueueMessage]] a [[MessageWithTimeouts]]. */ def create(cloudQueue: Supplier[CloudQueue]): Sink[MessageWithTimeouts, CompletionStage[Done]] = - AzureQueueSink.fromFunction { input: MessageWithTimeouts => + AzureQueueSink.fromFunction[MessageWithTimeouts] { input => AzureQueueSinkFunctions .addMessage(() => cloudQueue.get)(input.message, input.timeToLive, input.initialVisibility) } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala index 1421620ea..a805f5e4f 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala @@ -83,7 +83,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. onClose: java.lang.Runnable) = this(system.classicSystem, sessionProvider, executionContext, log, metricsCategory, init, onClose) - implicit private val ec = delegate.ec + implicit private val ec: ExecutionContext = delegate.ec /** * Closes the underlying Cassandra session. diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala index d2113e753..01d67fd96 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala @@ -15,7 +15,6 @@ package org.apache.pekko.stream.connectors.cassandra.scaladsl import java.util.concurrent.CompletionStage import java.util.concurrent.atomic.AtomicInteger - import org.apache.pekko import pekko.Done import pekko.testkit.TestKitBase @@ -27,7 +26,7 @@ import org.scalatest.concurrent.{ PatienceConfiguration, ScalaFutures } import scala.collection.immutable import scala.concurrent.duration._ -import scala.concurrent.{ Await, Future } +import scala.concurrent.{ Await, ExecutionContext, Future } import scala.util.control.NonFatal trait CassandraLifecycleBase { @@ -71,7 +70,7 @@ trait CassandraLifecycleBase { executeCql(lifecycleSession, statements.asScala.toList).asJava def withSchemaMetadataDisabled(block: => Future[Done]): Future[Done] = { - implicit val ec = lifecycleSession.ec + implicit val ec: ExecutionContext = lifecycleSession.ec lifecycleSession.underlying().flatMap { cqlSession => cqlSession.setSchemaMetadataEnabled(false) val blockResult = diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala index ebc0f08a8..6a35d171a 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala @@ -29,6 +29,8 @@ import pekko.stream.connectors.couchbase.{ import pekko.stream.scaladsl.Flow import com.couchbase.client.java.document.{ Document, JsonDocument } +import scala.concurrent.ExecutionContext + /** * Scala API: Factory methods for Couchbase flows. */ @@ -104,7 +106,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[T] .mapAsync(writeSettings.parallelism)(doc => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.upsertDoc(doc, writeSettings)) .map(_ => CouchbaseWriteSuccess(doc)) @@ -157,7 +159,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[T] .mapAsync(writeSettings.parallelism)(doc => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.replaceDoc(doc, writeSettings)) .map(_ => CouchbaseWriteSuccess(doc)) @@ -179,7 +181,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[String] .mapAsync(writeSettings.parallelism)(id => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.remove(id, writeSettings)) .map(_ => id) @@ -198,7 +200,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[String] .mapAsync(writeSettings.parallelism)(id => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.remove(id, writeSettings)) .map(_ => CouchbaseDeleteSuccess(id)) diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala index 57d2ca805..19a323589 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala @@ -25,7 +25,7 @@ import pekko.util.FutureConverters._ import com.typesafe.config.Config import scala.collection.immutable -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration.FiniteDuration /** @@ -39,7 +39,7 @@ sealed class DiscoverySupport private { private def readNodes( serviceName: String, lookupTimeout: FiniteDuration)(implicit system: ClassicActorSystemProvider): Future[immutable.Seq[String]] = { - implicit val ec = system.classicSystem.dispatcher + implicit val ec: ExecutionContext = system.classicSystem.dispatcher val discovery = Discovery(system).discovery discovery.lookup(serviceName, lookupTimeout).map { resolved => resolved.addresses.map(_.host) @@ -63,7 +63,7 @@ sealed class DiscoverySupport private { def nodes( config: Config)( implicit system: ClassicActorSystemProvider): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] = { - implicit val ec = system.classicSystem.dispatcher + implicit val ec: ExecutionContext = system.classicSystem.dispatcher settings => readNodes(config) .map { nodes => diff --git a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala index b661296ee..25bcc4fb9 100644 --- a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala +++ b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala @@ -5,7 +5,6 @@ package org.apache.pekko.stream.connectors.csv.scaladsl import java.util.concurrent.TimeUnit - import org.apache.pekko import pekko.NotUsed import pekko.actor.ActorSystem @@ -15,7 +14,7 @@ import pekko.util.ByteString import org.openjdk.jmh.annotations._ import org.openjdk.jmh.infra.Blackhole -import scala.concurrent.Await +import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration.Duration /** @@ -50,9 +49,9 @@ import scala.concurrent.duration.Duration @State(Scope.Benchmark) class CsvBench { - implicit val system = ActorSystem() - implicit val executionContext = system.dispatcher - implicit val mat = ActorMaterializer() + implicit val system: ActorSystem = ActorSystem() + implicit val executionContext: ExecutionContext = system.dispatcher + implicit val mat: ActorMaterializer = ActorMaterializer() /** * Size of [[ByteString]] chunks in bytes. diff --git a/csv/src/test/scala/docs/scaladsl/CsvSpec.scala b/csv/src/test/scala/docs/scaladsl/CsvSpec.scala index 276d543ae..41daa7816 100644 --- a/csv/src/test/scala/docs/scaladsl/CsvSpec.scala +++ b/csv/src/test/scala/docs/scaladsl/CsvSpec.scala @@ -30,7 +30,7 @@ abstract class CsvSpec with ScalaFutures with LogCapturing { - implicit val system = ActorSystem(this.getClass.getSimpleName) + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName) override protected def afterAll(): Unit = TestKit.shutdownActorSystem(system) diff --git a/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala b/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala index 50e805258..50bdd127b 100644 --- a/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala +++ b/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala @@ -29,7 +29,7 @@ import org.scalatest.wordspec.AnyWordSpec class PassThroughExamples extends AnyWordSpec with BeforeAndAfterAll with Matchers with ScalaFutures { - implicit val system = ActorSystem("Test") + implicit val system: ActorSystem = ActorSystem("Test") "PassThroughFlow" should { " original message is maintained " in { @@ -103,7 +103,7 @@ object PassThroughFlow { //#PassThrough object PassThroughFlowKafkaCommitExample { - implicit val system = ActorSystem("Test") + implicit val system: ActorSystem = ActorSystem("Test") def dummy(): Unit = { // #passThroughKafkaFlow diff --git a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala index a948391be..475fc019c 100644 --- a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala +++ b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala @@ -17,15 +17,14 @@ import java.net.URI import org.apache.pekko import pekko.NotUsed +import pekko.actor.ActorSystem import pekko.stream.connectors.testkit.scaladsl.LogCapturing import pekko.stream.scaladsl.{ FlowWithContext, SourceWithContext } import scala.util.{ Failure, Success, Try } //#init-client -import org.apache.pekko.actor.ActorSystem //#init-client -import org.apache.pekko import pekko.stream.connectors.dynamodb.DynamoDbOp._ import pekko.stream.connectors.dynamodb.scaladsl._ import pekko.stream.scaladsl.{ Sink, Source } diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala index 7ec576a4f..33b8c8032 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala @@ -364,7 +364,7 @@ trait ElasticsearchConnectorBehaviour { val indexName = "sink7" val createBooks = Source(books) - .map { book: (String, Book) => + .map { (book: (String, Book)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -391,7 +391,7 @@ trait ElasticsearchConnectorBehaviour { // Update sink7/_doc with the second dataset val upserts = Source(updatedBooks) - .map { book: (String, JsObject) => + .map { (book: (String, JsObject)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -453,7 +453,7 @@ trait ElasticsearchConnectorBehaviour { "read and write document-version if configured to do so" in { case class VersionTestDoc(id: String, name: String, value: Int) - implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc) + implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc.apply) val indexName = "version-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala index 4236234bc..2c67de274 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala @@ -45,7 +45,7 @@ trait ElasticsearchSpecUtils { this: AnyWordSpec with ScalaFutures => case class Book(title: String, shouldSkip: Option[Boolean] = None, price: Int = 10) - implicit val format: JsonFormat[Book] = jsonFormat3(Book) + implicit val format: JsonFormat[Book] = jsonFormat3(Book.apply) // #define-class def register(connectionSettings: ElasticsearchConnectionSettings, diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala index 3c9fd4132..52b27dc11 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala @@ -64,7 +64,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[spray.json.JsObject] => + .map { (message: ReadResult[spray.json.JsObject]) => val book: Book = jsonReader[Book].read(message.source) WriteMessage.createIndexMessage(message.id, book) } @@ -97,7 +97,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .runWith( @@ -129,7 +129,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .via( @@ -200,7 +200,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -251,7 +251,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-bulk" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -306,7 +306,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-nop" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -363,7 +363,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt register(connectionSettings, indexName, "dummy", 10) // need to create index else exception in reading below val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -447,7 +447,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage .createIndexMessage(message.id, message.source) .withIndexName(customIndexName) // Setting the index-name to use for this document @@ -480,7 +480,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt case class TestDoc(id: String, a: String, b: Option[String], c: String) // #custom-search-params - implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc) + implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) val indexName = "custom-search-params-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala index 28f064cc8..1d2522454 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala @@ -56,7 +56,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[spray.json.JsObject] => + .map { (message: ReadResult[spray.json.JsObject]) => val book: Book = jsonReader[Book].read(message.source) WriteMessage.createIndexMessage(message.id, book) } @@ -88,7 +88,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .runWith( @@ -119,7 +119,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .via( @@ -187,7 +187,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -237,7 +237,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-bulk" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -291,7 +291,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-nop" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -347,7 +347,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt register(connectionSettings, indexName, "dummy", 10) // need to create index else exception in reading below val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -428,7 +428,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage .createIndexMessage(message.id, message.source) .withIndexName(customIndexName) // Setting the index-name to use for this document @@ -458,7 +458,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt case class TestDoc(id: String, a: String, b: Option[String], c: String) - implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc) + implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) val indexName = "custom-search-params-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala index 72ca863c0..77c513ee1 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala @@ -364,7 +364,7 @@ trait OpensearchConnectorBehaviour { val indexName = "sink7" val createBooks = Source(books) - .map { book: (String, Book) => + .map { (book: (String, Book)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -391,7 +391,7 @@ trait OpensearchConnectorBehaviour { // Update sink7/_doc with the second dataset val upserts = Source(updatedBooks) - .map { book: (String, JsObject) => + .map { (book: (String, JsObject)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -453,7 +453,7 @@ trait OpensearchConnectorBehaviour { "read and write document-version if configured to do so" in { case class VersionTestDoc(id: String, name: String, value: Int) - implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc) + implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc.apply) val indexName = "version-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala index b202588e0..3ba59ee55 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala @@ -65,7 +65,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[spray.json.JsObject] => + .map { (message: ReadResult[spray.json.JsObject]) => val book: Book = jsonReader[Book].read(message.source) WriteMessage.createIndexMessage(message.id, book) } @@ -100,7 +100,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .runWith( @@ -133,7 +133,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .via( @@ -205,7 +205,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils val indexName = "sink6" val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -255,7 +255,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils val indexName = "sink6-bulk" val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -309,7 +309,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils val indexName = "sink6-nop" val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -365,7 +365,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils register(connectionSettings, indexName, "dummy", 10) // need to create index else exception in reading below val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -449,7 +449,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage .createIndexMessage(message.id, message.source) .withIndexName(customIndexName) // Setting the index-name to use for this document @@ -480,7 +480,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils case class TestDoc(id: String, a: String, b: Option[String], c: String) - implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc) + implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) val indexName = "custom-search-params-test-scala" val typeName = "_doc" diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala index 84465b0f2..75f0ffe74 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala @@ -16,12 +16,10 @@ package org.apache.pekko.stream.connectors.file.javadsl import java.nio.file.{ Path, StandardOpenOption } import java.util.Optional import java.util.concurrent.CompletionStage - import org.apache.pekko import pekko.Done import pekko.stream.javadsl import pekko.stream.scaladsl -import pekko.stream.javadsl.Sink import pekko.util.ByteString import pekko.japi.function import pekko.util.ccompat.JavaConverters._ @@ -42,11 +40,11 @@ object LogRotatorSink { */ def createFromFunction( triggerGeneratorCreator: function.Creator[function.Function[ByteString, Optional[Path]]]) - : javadsl.Sink[ByteString, CompletionStage[Done]] = - new Sink( - pekko.stream.connectors.file.scaladsl - .LogRotatorSink(asScala(triggerGeneratorCreator)) - .toCompletionStage()) + : javadsl.Sink[ByteString, CompletionStage[Done]] = { + val logRotatorSink = new scaladsl.SinkToCompletionStage[ByteString, Done](pekko.stream.connectors.file.scaladsl + .LogRotatorSink(asScala(triggerGeneratorCreator))) + new javadsl.Sink(logRotatorSink.toCompletionStage()) + } /** * Sink directing the incoming `ByteString`s to new files whenever `triggerGenerator` returns a value. @@ -56,11 +54,12 @@ object LogRotatorSink { */ def createFromFunctionAndOptions( triggerGeneratorCreator: function.Creator[function.Function[ByteString, Optional[Path]]], - fileOpenOptions: java.util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[Done]] = - new Sink( - pekko.stream.connectors.file.scaladsl - .LogRotatorSink(asScala(triggerGeneratorCreator), fileOpenOptions.asScala.toSet) - .toCompletionStage()) + fileOpenOptions: java.util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[Done]] = { + + val logRotatorSink = new scaladsl.SinkToCompletionStage[ByteString, Done](pekko.stream.connectors.file.scaladsl + .LogRotatorSink(asScala(triggerGeneratorCreator), fileOpenOptions.asScala.toSet)) + new javadsl.Sink(logRotatorSink.toCompletionStage()) + } /** * Sink directing the incoming `ByteString`s to a new `Sink` created by `sinkFactory` whenever `triggerGenerator` returns a value. @@ -72,14 +71,14 @@ object LogRotatorSink { */ def withSinkFactory[C, R]( triggerGeneratorCreator: function.Creator[function.Function[ByteString, Optional[C]]], - sinkFactory: function.Function[C, Sink[ByteString, CompletionStage[R]]]) + sinkFactory: function.Function[C, javadsl.Sink[ByteString, CompletionStage[R]]]) : javadsl.Sink[ByteString, CompletionStage[Done]] = { val t: C => scaladsl.Sink[ByteString, Future[R]] = path => sinkFactory.apply(path).asScala.mapMaterializedValue(_.asScala) - new Sink( - pekko.stream.connectors.file.scaladsl.LogRotatorSink - .withSinkFactory(asScala[C](triggerGeneratorCreator), t) - .toCompletionStage()) + val logRotatorSink = + new scaladsl.SinkToCompletionStage[ByteString, Done](pekko.stream.connectors.file.scaladsl.LogRotatorSink + .withSinkFactory(asScala[C](triggerGeneratorCreator), t)) + new javadsl.Sink(logRotatorSink.toCompletionStage()) } private def asScala[C]( diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala index 7550936db..fcb19d0b0 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala @@ -167,7 +167,7 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => override def postStop(): Unit = promise.completeWith { - implicit val ec = materializer.executionContext + implicit val ec: ExecutionContext = materializer.executionContext Future .sequence(sinkCompletions) .map(_ => Done)(pekko.dispatch.ExecutionContexts.parasitic) diff --git a/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala b/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala index fb2e3570b..24f744e83 100644 --- a/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala +++ b/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala @@ -95,7 +95,7 @@ class LogRotatorSinkSpec "complete when consuming an empty source" in assertAllStagesStopped { val triggerCreator: () => ByteString => Option[Path] = () => { - element: ByteString => fail("trigger creator should not be called") + (element: ByteString) => fail("trigger creator should not be called") } val rotatorSink: Sink[ByteString, Future[Done]] = @@ -112,7 +112,7 @@ class LogRotatorSinkSpec val fileSizeTriggerCreator: () => ByteString => Option[Path] = () => { val max = 10 * 1024 * 1024 var size: Long = max - element: ByteString => + (element: ByteString) => if (size + element.size > max) { val path = Files.createTempFile("out-", ".log") size = element.size diff --git a/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala index 1859f83a1..177503d8c 100644 --- a/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala +++ b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala @@ -20,8 +20,9 @@ import java.nio.file.{ Files, Paths } import java.time.Instant import java.util.concurrent.TimeUnit import org.apache.pekko -import pekko.stream.{ IOOperationIncompleteException, IOResult } +import pekko.stream.{ IOOperationIncompleteException, IOResult, Materializer } import BaseSftpSupport.{ CLIENT_PRIVATE_KEY_PASSPHRASE => ClientPrivateKeyPassphrase } +import pekko.actor.ActorSystem import pekko.stream.scaladsl.{ Keep, Sink, Source } import pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped import pekko.stream.testkit.scaladsl.TestSink @@ -30,7 +31,7 @@ import org.scalatest.concurrent.Eventually import org.scalatest.time.{ Millis, Seconds, Span } import scala.collection.immutable -import scala.concurrent.{ Await, ExecutionContextExecutor } +import scala.concurrent.{ Await, ExecutionContext, ExecutionContextExecutor } import scala.concurrent.duration._ import scala.util.Random @@ -85,14 +86,14 @@ final class UnconfirmedReadsSftpSourceSpec extends BaseSftpSpec with CommonFtpSt trait CommonFtpStageSpec extends BaseSpec with Eventually { - implicit val system = getSystem - implicit val mat = getMaterializer - implicit val defaultPatience = + implicit val system: ActorSystem = getSystem + implicit val mat: Materializer = getMaterializer + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(30, Seconds), interval = Span(600, Millis)) "FtpBrowserSource" should { "complete with a failed Future, when the credentials supplied were wrong" in assertAllStagesStopped { - implicit val ec = system.getDispatcher + implicit val ec: ExecutionContext = system.getDispatcher listFilesWithWrongCredentials("") .toMat(Sink.seq)(Keep.right) .run() diff --git a/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala b/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala index 48532d0be..46dca6113 100644 --- a/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala +++ b/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala @@ -30,7 +30,7 @@ import org.scalatest.wordspec.AnyWordSpec class GeodeBaseSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with LogCapturing { - implicit val system = ActorSystem("test") + implicit val system: ActorSystem = ActorSystem("test") // #region val personsRegionSettings: RegionSettings[Int, Person] = RegionSettings("persons", (p: Person) => p.id) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala index 8a9a0d51e..426f1dd8d 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala @@ -77,5 +77,5 @@ object BigQueryException { } private final case class ErrorResponse(error: Option[ErrorProto]) - private implicit val errorResponseFormat: RootJsonFormat[ErrorResponse] = jsonFormat1(ErrorResponse) + private implicit val errorResponseFormat: RootJsonFormat[ErrorResponse] = jsonFormat1(ErrorResponse.apply) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala index 08998f071..fa0451534 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala @@ -24,7 +24,6 @@ import com.fasterxml.jackson.annotation._ import spray.json.{ JsonFormat, RootJsonFormat, RootJsonReader, RootJsonWriter } import java.{ lang, util } - import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance import scala.collection.immutable.Seq @@ -166,8 +165,8 @@ object TableDataInsertAllRequest { implicit def writer[T]( implicit writer: BigQueryRootJsonWriter[T]): RootJsonWriter[TableDataInsertAllRequest[T]] = { - implicit val format = lift(writer) - implicit val rowFormat = jsonFormat2(Row[T]) + implicit val format: RootJsonFormat[T] = lift(writer) + implicit val rowFormat: RootJsonFormat[Row[T]] = jsonFormat2(Row[T]) jsonFormat4(TableDataInsertAllRequest[T]) } } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala index feea44fab..b130f5f3e 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala @@ -28,7 +28,7 @@ import pekko.stream.connectors.googlecloud.bigquery.{ BigQueryEndpoints, BigQuer import pekko.stream.scaladsl.Source import pekko.{ Done, NotUsed } -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } private[scaladsl] trait BigQueryDatasets { this: BigQueryRest => @@ -102,7 +102,7 @@ private[scaladsl] trait BigQueryDatasets { this: BigQueryRest => settings: GoogleSettings): Future[Dataset] = { import BigQueryException._ import SprayJsonSupport._ - implicit val ec = ExecutionContexts.parasitic + implicit val ec: ExecutionContext = ExecutionContexts.parasitic val uri = BigQueryEndpoints.datasets(settings.projectId) Marshal(dataset).to[RequestEntity].flatMap { entity => val request = HttpRequest(POST, uri, entity = entity) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala index 6ae985019..166e1037f 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala @@ -43,7 +43,7 @@ import pekko.stream.scaladsl.{ Flow, GraphDSL, Keep, Sink } import pekko.util.ByteString import scala.annotation.nowarn -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } private[scaladsl] trait BigQueryJobs { this: BigQueryRest => @@ -114,7 +114,7 @@ private[scaladsl] trait BigQueryJobs { this: BigQueryRest => .fromMaterializer { (mat, attr) => import SprayJsonSupport._ import mat.executionContext - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val BigQuerySettings(loadJobPerTableQuota) = BigQueryAttributes.resolveSettings(mat, attr) val job = Job( @@ -168,8 +168,8 @@ private[scaladsl] trait BigQueryJobs { this: BigQueryRest => Sink .fromMaterializer { (mat, attr) => import BigQueryException._ - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) - implicit val ec = ExecutionContexts.parasitic + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) + implicit val ec: ExecutionContext = ExecutionContexts.parasitic val uri = BigQueryMediaEndpoints.jobs(settings.projectId).withQuery(Query("uploadType" -> "resumable")) Sink .lazyFutureSink { () => diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala index a86072b98..be092ef82 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl import org.apache.pekko +import pekko.actor.ActorSystem import pekko.NotUsed import pekko.dispatch.ExecutionContexts import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -23,14 +24,14 @@ import pekko.http.scaladsl.model.Uri.Query import pekko.http.scaladsl.model.{ HttpRequest, RequestEntity } import pekko.http.scaladsl.unmarshalling.FromEntityUnmarshaller import pekko.stream.RestartSettings -import pekko.stream.connectors.google.GoogleAttributes +import pekko.stream.connectors.google.{ GoogleAttributes, GoogleSettings } import pekko.stream.connectors.google.implicits._ import pekko.stream.connectors.googlecloud.bigquery.model.JobReference import pekko.stream.connectors.googlecloud.bigquery.model.{ QueryRequest, QueryResponse } import pekko.stream.connectors.googlecloud.bigquery.{ BigQueryEndpoints, BigQueryException } import pekko.stream.scaladsl.{ Keep, RestartSource, Sink, Source } -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration.FiniteDuration import scala.util.{ Failure, Success } @@ -70,9 +71,9 @@ private[scaladsl] trait BigQueryQueries { this: BigQueryRest => .fromMaterializer { (mat, attr) => import BigQueryException._ import SprayJsonSupport._ - implicit val system = mat.system - implicit val ec = ExecutionContexts.parasitic - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val system: ActorSystem = mat.system + implicit val ec: ExecutionContext = ExecutionContexts.parasitic + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) Source.lazyFutureSource { () => for { diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala index 676e400b7..29b9cb4f8 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl import org.apache.pekko +import pekko.actor.ActorSystem import pekko.NotUsed import pekko.dispatch.ExecutionContexts import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -22,7 +23,7 @@ import pekko.http.scaladsl.model.HttpMethods.POST import pekko.http.scaladsl.model.Uri.Query import pekko.http.scaladsl.model.{ HttpRequest, RequestEntity } import pekko.http.scaladsl.unmarshalling.{ FromEntityUnmarshaller, FromResponseUnmarshaller } -import pekko.stream.connectors.google.GoogleAttributes +import pekko.stream.connectors.google.{ GoogleAttributes, GoogleSettings } import pekko.stream.connectors.google.http.GoogleHttp import pekko.stream.connectors.google.implicits._ import pekko.stream.connectors.googlecloud.bigquery.model.{ @@ -131,9 +132,9 @@ private[scaladsl] trait BigQueryTableData { this: BigQueryRest => .fromMaterializer { (mat, attr) => import BigQueryException._ import SprayJsonSupport._ - implicit val system = mat.system + implicit val system: ActorSystem = mat.system implicit val ec = ExecutionContexts.parasitic - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uri = BigQueryEndpoints.tableDataInsertAll(settings.projectId, datasetId, tableId) val request = HttpRequest(POST, uri) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala index ccde83332..2938d077c 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala @@ -29,7 +29,7 @@ import pekko.stream.connectors.googlecloud.bigquery.scaladsl.schema.TableSchemaW import pekko.stream.connectors.googlecloud.bigquery.{ BigQueryEndpoints, BigQueryException } import pekko.stream.scaladsl.{ Keep, Sink, Source } -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } private[scaladsl] trait BigQueryTables { this: BigQueryRest => @@ -94,7 +94,7 @@ private[scaladsl] trait BigQueryTables { this: BigQueryRest => settings: GoogleSettings): Future[Table] = { import BigQueryException._ import SprayJsonSupport._ - implicit val ec = ExecutionContexts.parasitic + implicit val ec: ExecutionContext = ExecutionContexts.parasitic val projectId = table.tableReference.projectId.getOrElse(settings.projectId) val datasetId = table.tableReference.datasetId val uri = BigQueryEndpoints.tables(projectId, datasetId) diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala index 2c1c549b3..7051651d9 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala @@ -50,7 +50,7 @@ class BigQueryQueriesSpec jsonFormat10(QueryResponse[T]) } - implicit val settings = GoogleSettings().copy(credentials = NoCredentials("", "")) + implicit val settings: GoogleSettings = GoogleSettings().copy(credentials = NoCredentials("", "")) val jobId = "jobId" val pageToken = "pageToken" diff --git a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala index ea402dbd5..38eba06e4 100644 --- a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala +++ b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala @@ -50,7 +50,7 @@ class IntegrationSpec with OptionValues with LogCapturing { - implicit val system = ActorSystem("IntegrationSpec") + implicit val system: ActorSystem = ActorSystem("IntegrationSpec") implicit val defaultPatience = PatienceConfig(timeout = 15.seconds, interval = 50.millis) diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala index 7ec28d3e9..696406f89 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala @@ -23,7 +23,7 @@ import pekko.http.scaladsl.marshalling.Marshal import pekko.http.scaladsl.model.HttpMethods.POST import pekko.http.scaladsl.model._ import pekko.http.scaladsl.unmarshalling.{ FromResponseUnmarshaller, Unmarshal, Unmarshaller } -import pekko.stream.connectors.google.GoogleAttributes +import pekko.stream.connectors.google.{ GoogleAttributes, GoogleSettings, RequestSettings } import pekko.stream.connectors.google.http.GoogleHttp import pekko.stream.connectors.google.implicits._ import pekko.stream.connectors.googlecloud.pubsub._ @@ -142,7 +142,7 @@ private[pubsub] trait PubSubApi { AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) } - private implicit val pullRequestFormat = DefaultJsonProtocol.jsonFormat2(PullRequest) + private implicit val pullRequestFormat = DefaultJsonProtocol.jsonFormat2(PullRequest.apply) private def scheme: String = if (isEmulated) "http" else "https" @@ -150,8 +150,8 @@ private[pubsub] trait PubSubApi { Flow .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) - implicit val requestSettings = settings.requestSettings + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) + implicit val requestSettings: RequestSettings = settings.requestSettings val url: Uri = Uri.from( scheme = scheme, @@ -188,8 +188,8 @@ private[pubsub] trait PubSubApi { Flow .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) - implicit val requestSettings = settings.requestSettings + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) + implicit val requestSettings: RequestSettings = settings.requestSettings val url: Uri = Uri.from( scheme = scheme, @@ -239,8 +239,8 @@ private[pubsub] trait PubSubApi { Flow .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val system = mat.system - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val system: ActorSystem = mat.system + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val url: Uri = s"/v1/projects/${settings.projectId}/topics/$topic:publish" FlowWithContext[PublishRequest, T] .mapAsync(parallelism) { request => diff --git a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala index 0a611e470..d3928b8a2 100644 --- a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala +++ b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala @@ -31,7 +31,7 @@ import scala.concurrent.{ Future, Promise } class ExampleUsage { // #init-system - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() val config = PubSubConfig() val topic = "topic1" val subscription = "subscription1" diff --git a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala index 3a2cc47e1..f7a1d2fab 100644 --- a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala @@ -45,7 +45,7 @@ class IntegrationSpec with OptionValues with LogCapturing { - private implicit val system = ActorSystem("IntegrationSpec") + private implicit val system: ActorSystem = ActorSystem("IntegrationSpec") override def afterAll(): Unit = TestKit.shutdownActorSystem(system) diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala index afd6daf47..03c179ca5 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala @@ -42,10 +42,10 @@ class GooglePubSubSpec with LogCapturing with BeforeAndAfterAll { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 5.seconds, interval = 100.millis) - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system) diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala index 78dd49392..d5bbb9e7e 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala @@ -53,7 +53,7 @@ class NoopTrustManager extends X509TrustManager { class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures with Matchers with LogCapturing { - implicit val system = ActorSystem( + implicit val system: ActorSystem = ActorSystem( "PubSubApiSpec", ConfigFactory .parseString( diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala index aab63a46c..5c20c3931 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala @@ -26,13 +26,15 @@ import scala.util.Try object Formats extends DefaultJsonProtocol { private final case class CustomerEncryption(encryptionAlgorithm: String, keySha256: String) - private implicit val customerEncryptionJsonFormat = jsonFormat2(CustomerEncryption) + + private implicit val customerEncryptionJsonFormat: RootJsonFormat[CustomerEncryption] = + jsonFormat2(CustomerEncryption.apply) private final case class Owner(entity: String, entityId: Option[String]) - private implicit val OwnerJsonFormat = jsonFormat2(Owner) + private implicit val OwnerJsonFormat: RootJsonFormat[Owner] = jsonFormat2(Owner.apply) private final case class ProjectTeam(projectNumber: String, team: String) - private implicit val ProjectTeamJsonFormat = jsonFormat2(ProjectTeam) + private implicit val ProjectTeamJsonFormat: RootJsonFormat[ProjectTeam] = jsonFormat2(ProjectTeam.apply) private final case class ObjectAccessControls(kind: String, id: String, @@ -129,7 +131,7 @@ object Formats extends DefaultJsonProtocol { prefixes: Option[List[String]], items: Option[List[StorageObjectJson]]) - private implicit val bucketInfoJsonFormat = jsonFormat6(BucketInfoJson) + private implicit val bucketInfoJsonFormat: RootJsonFormat[BucketInfoJson] = jsonFormat6(BucketInfoJson.apply) /** * Google API rewrite response object @@ -144,7 +146,8 @@ object Formats extends DefaultJsonProtocol { rewriteToken: Option[String], resource: Option[StorageObjectJson]) - private implicit val rewriteResponseFormat = jsonFormat6(RewriteResponseJson) + private implicit val rewriteResponseFormat: RootJsonFormat[RewriteResponseJson] = + jsonFormat6(RewriteResponseJson.apply) /** * Google API bucket response object @@ -159,7 +162,7 @@ object Formats extends DefaultJsonProtocol { selfLink: String, etag: String) - implicit val bucketInfoFormat = jsonFormat2(BucketInfo) + implicit val bucketInfoFormat: RootJsonFormat[BucketInfo] = jsonFormat2(BucketInfo.apply) implicit object BucketListResultReads extends RootJsonReader[BucketListResult] { override def read(json: JsValue): BucketListResult = { @@ -172,7 +175,7 @@ object Formats extends DefaultJsonProtocol { } } - private implicit val bucketListResultJsonReads = jsonFormat4(BucketListResultJson) + private implicit val bucketListResultJsonReads = jsonFormat4(BucketListResultJson.apply) implicit object RewriteResponseReads extends RootJsonReader[RewriteResponse] { override def read(json: JsValue): RewriteResponse = { diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala index 53319fc93..0ba705a5c 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala @@ -37,7 +37,7 @@ import pekko.{ Done, NotUsed } import spray.json._ import scala.annotation.nowarn -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } @InternalApi private[storage] object GCStorageStream { @@ -55,7 +55,7 @@ import scala.concurrent.Future val uri = Uri(gcsSettings.endpointUrl) .withPath(Path(gcsSettings.basePath) / "b") .withQuery(Query("project" -> settings.projectId)) - implicit val ec = parasitic + implicit val ec: ExecutionContext = parasitic val request = Marshal(BucketInfo(bucketName, location)).to[RequestEntity].map { entity => HttpRequest(POST, uri, entity = entity) } @@ -142,7 +142,7 @@ import scala.concurrent.Future metadata: Option[Map[String, String]] = None): Sink[ByteString, Future[StorageObject]] = Sink .fromMaterializer { (mat, attr) => - implicit val settings = { + implicit val settings: GoogleSettings = { val s = resolveSettings(mat, attr) s.copy(requestSettings = s.requestSettings.copy(uploadChunkSize = chunkSize)) } @@ -226,7 +226,7 @@ import scala.concurrent.Future private def makeRequestSource[T: FromResponseUnmarshaller](request: Future[HttpRequest]): Source[T, NotUsed] = Source .fromMaterializer { (mat, attr) => - implicit val settings = resolveSettings(mat, attr) + implicit val settings: GoogleSettings = resolveSettings(mat, attr) Source.lazyFuture { () => request.flatMap { request => GoogleHttp()(mat.system).singleAuthenticatedRequest[T](request) diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala index e93147b73..547941298 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala @@ -31,7 +31,7 @@ class GCSExtSpec extends AnyFlatSpec with Matchers with LogCapturing { "pekko.connectors.google.cloud-storage.endpoint-url" -> endpointUrl, "pekko.connectors.google.cloud-storage.base-path" -> basePath).asJava) - implicit val system = ActorSystem.create("gcs", config) + implicit val system: ActorSystem = ActorSystem.create("gcs", config) val ext = GCSExt(system) ext.settings.endpointUrl shouldBe endpointUrl diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala index ee8de4f20..a17a715c9 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala @@ -42,7 +42,7 @@ class GCStorageExtSpec extends AnyFlatSpec with Matchers with LogCapturing { "pekko.connectors.google.cloud.storage.base-path" -> basePath, "pekko.connectors.google.cloud.storage.token-url" -> tokenUrl, "pekko.connectors.google.cloud.storage.token-scope" -> tokenScope).asJava) - implicit val system = ActorSystem.create("gcStorage", config) + implicit val system: ActorSystem = ActorSystem.create("gcStorage", config) @nowarn("msg=deprecated") val ext = GCStorageExt(system) diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala index ea391e491..eb046720a 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala @@ -19,7 +19,7 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } -import scala.concurrent.Await +import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration._ trait WithMaterializerGlobal @@ -30,7 +30,7 @@ trait WithMaterializerGlobal with IntegrationPatience with Matchers { implicit val actorSystem = ActorSystem("test") - implicit val ec = actorSystem.dispatcher + implicit val ec: ExecutionContext = actorSystem.dispatcher override protected def afterAll(): Unit = { super.afterAll() diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala index 7560af7c9..20fbf7004 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.dispatch.ExecutionContexts import pekko.http.scaladsl.model.HttpMethods.GET @@ -51,8 +52,8 @@ private[connectors] object PaginatedRequest { Source .fromMaterializer { (mat, attr) => - implicit val system = mat.system - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val system: ActorSystem = mat.system + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val requestWithPageToken = addPageToken(request, query) Source.unfoldAsync[Either[Done, Option[String]], Out](Right(initialPageToken)) { diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index 5da81e3fb..f50c921b9 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google import org.apache.pekko +import pekko.actor.ActorSystem import pekko.NotUsed import pekko.annotation.InternalApi import pekko.http.scaladsl.model.HttpMethods.{ POST, PUT } @@ -56,7 +57,7 @@ private[connectors] object ResumableUpload { .fromMaterializer { (mat, attr) => import mat.executionContext implicit val materializer = mat - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uploadChunkSize = settings.requestSettings.uploadChunkSize val in = Flow[ByteString] @@ -95,7 +96,7 @@ private[connectors] object ResumableUpload { private def initiateSession(request: HttpRequest)(implicit mat: Materializer, settings: GoogleSettings): Future[Uri] = { - implicit val system = mat.system + implicit val system: ActorSystem = mat.system import implicits._ implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => @@ -111,7 +112,7 @@ private[connectors] object ResumableUpload { private def uploadChunk[T: FromResponseUnmarshaller]( request: HttpRequest)(implicit mat: Materializer): Flow[Either[T, MaybeLast[Chunk]], Try[Option[T]], NotUsed] = { - implicit val system = mat.system + implicit val system: ActorSystem = mat.system val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => response.status match { @@ -146,7 +147,7 @@ private[connectors] object ResumableUpload { request: HttpRequest, chunk: Future[MaybeLast[Chunk]])( implicit mat: Materializer, settings: GoogleSettings): Future[Either[T, MaybeLast[Chunk]]] = { - implicit val system = mat.system + implicit val system: ActorSystem = mat.system import implicits._ implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala index da0e06726..b205a11cb 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.http.scaladsl.Http import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -42,7 +43,7 @@ private[auth] object GoogleComputeMetadata { clock: Clock): Future[AccessToken] = { import SprayJsonSupport._ import mat.executionContext - implicit val system = mat.system + implicit val system: ActorSystem = mat.system for { response <- Http().singleRequest(tokenRequest) token <- Unmarshal(response.entity).to[AccessToken] @@ -52,7 +53,7 @@ private[auth] object GoogleComputeMetadata { def getProjectId()( implicit mat: Materializer): Future[String] = { import mat.executionContext - implicit val system = mat.system + implicit val system: ActorSystem = mat.system for { response <- Http().singleRequest(projectIdRequest) projectId <- Unmarshal(response.entity).to[String] diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala index 615e7959d..4099f91c1 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import pekko.http.scaladsl.model.HttpMethods.POST @@ -42,7 +43,7 @@ private[auth] object GoogleOAuth2 { import GoogleOAuth2Exception._ import SprayJsonSupport._ import implicits._ - implicit val system = mat.system + implicit val system: ActorSystem = mat.system try { val entity = FormData( @@ -71,5 +72,5 @@ private[auth] object GoogleOAuth2 { } final case class JwtClaimContent(scope: String) - implicit val jwtClaimContentFormat: JsonFormat[JwtClaimContent] = jsonFormat1(JwtClaimContent) + implicit val jwtClaimContentFormat: JsonFormat[JwtClaimContent] = jsonFormat1(JwtClaimContent.apply) } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala index 5f2c78b5c..bbd9e848d 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala @@ -38,7 +38,7 @@ private[auth] final class GoogleOAuth2Credentials(credentials: OAuth2Credentials Await.result(requestMetadata, Duration.Inf) override def getRequestMetadata(uri: URI, executor: Executor, callback: RequestMetadataCallback): Unit = { - implicit val ec = ExecutionContext.fromExecutor(executor) + implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(executor) requestMetadata.onComplete { case Success(metadata) => callback.onSuccess(metadata) case Failure(ex) => callback.onFailure(ex) diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala index 9c7e373d5..ae9690008 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala @@ -28,7 +28,8 @@ private[google] object GoogleOAuth2Exception { private val internalFailure = "internal_failure" private final case class OAuth2ErrorResponse(error: Option[String], error_description: Option[String]) - private implicit val oAuth2ErrorResponseFormat: RootJsonFormat[OAuth2ErrorResponse] = jsonFormat2(OAuth2ErrorResponse) + private implicit val oAuth2ErrorResponseFormat: RootJsonFormat[OAuth2ErrorResponse] = + jsonFormat2(OAuth2ErrorResponse.apply) implicit val unmarshaller: FromResponseUnmarshaller[Throwable] = Unmarshaller diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala index 2914286e9..369365415 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala @@ -57,7 +57,7 @@ private[connectors] object ServiceAccountCredentials { final case class ServiceAccountCredentialsFile(project_id: String, client_email: String, private_key: String) implicit val serviceAccountCredentialsFormat: RootJsonFormat[ServiceAccountCredentialsFile] = jsonFormat3( - ServiceAccountCredentialsFile) + ServiceAccountCredentialsFile.apply) } @InternalApi diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala index b41549dcb..be1f71ea9 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala @@ -61,7 +61,7 @@ private[connectors] object UserAccessCredentials { refresh_token: String, quota_project_id: String) implicit val userAccessCredentialsFormat: RootJsonFormat[UserAccessCredentialsFile] = jsonFormat4( - UserAccessCredentialsFile) + UserAccessCredentialsFile.apply) } @InternalApi diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala index 56e834d8e..c3efbfcce 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.http.scaladsl.Http import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -45,7 +46,7 @@ private[auth] object UserAccessMetadata { clock: Clock): Future[AccessToken] = { import SprayJsonSupport._ import mat.executionContext - implicit val system = mat.system + implicit val system: ActorSystem = mat.system for { response <- Http().singleRequest(tokenRequest(clientId, clientSecret, refreshToken)) token <- Unmarshal(response.entity).to[AccessToken] diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala index f5326563d..d536f8b00 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala @@ -76,7 +76,7 @@ private[connectors] final class GoogleHttp private (val http: HttpExt) extends A def singleAuthenticatedRequest[T](request: HttpRequest)( implicit settings: GoogleSettings, um: FromResponseUnmarshaller[T]): Future[T] = Retry(settings.requestSettings.retrySettings) { - implicit val requestSettings = settings.requestSettings + implicit val requestSettings: RequestSettings = settings.requestSettings addAuth(request).flatMap(singleRequest(_))(ExecutionContexts.parasitic) } @@ -110,7 +110,7 @@ private[connectors] final class GoogleHttp private (val http: HttpExt) extends A parallelism: Int = 1): FlowWithContext[HttpRequest, Ctx, Try[T], Ctx, Future[HostConnectionPool]] = FlowWithContext.fromTuples { Flow.fromMaterializer { (mat, attr) => - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val p = if (port == -1) if (https) 443 else 80 else port val uriFlow = FlowWithContext[HttpRequest, Ctx].map(addStandardQuery) @@ -163,7 +163,7 @@ private[connectors] final class GoogleHttp private (val http: HttpExt) extends A .fold(settings.requestSettings.queryString)(_.concat(settings.requestSettings.`&queryString`))))) private def addAuth(request: HttpRequest)(implicit settings: GoogleSettings): Future[HttpRequest] = { - implicit val requestSettings = settings.requestSettings + implicit val requestSettings: RequestSettings = settings.requestSettings settings.credentials .get() .map { token => diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala index d58c54de0..5d64b5b2a 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala @@ -43,7 +43,7 @@ class PaginatedRequestSpec super.afterAll() } - implicit val patience = PatienceConfig(remainingOrDefault) + implicit val patience: PatienceConfig = PatienceConfig(remainingOrDefault) implicit val paginated: Paginated[JsValue] = _.asJsObject.fields.get("pageToken").flatMap { case JsString(value) => Some(value) case _ => None diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala index 15ded38bf..819834bb8 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala @@ -40,7 +40,7 @@ class ResumableUploadSpec with ScalaFutures with HoverflySupport { - implicit val patience = PatienceConfig(remainingOrDefault) + implicit val patience: PatienceConfig = PatienceConfig(remainingOrDefault) override def afterAll(): Unit = { TestKit.shutdownActorSystem(system) diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala index 08c067da5..1bd56b790 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala @@ -15,7 +15,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko import pekko.actor.ActorSystem -import pekko.stream.connectors.google.{ GoogleSettings, HoverflySupport } +import pekko.stream.connectors.google.{ GoogleSettings, HoverflySupport, RequestSettings } import pekko.testkit.TestKit import io.specto.hoverfly.junit.core.SimulationSource.dsl import io.specto.hoverfly.junit.core.model.RequestFieldMatcher.newRegexMatcher @@ -42,10 +42,10 @@ class GoogleOAuth2Spec TestKit.shutdownActorSystem(system) super.afterAll() } - implicit val defaultPatience = PatienceConfig(remainingOrDefault) + implicit val defaultPatience: PatienceConfig = PatienceConfig(remainingOrDefault) implicit val executionContext: ExecutionContext = system.dispatcher - implicit val settings = GoogleSettings(system) + implicit val settings: GoogleSettings = GoogleSettings(system) implicit val clock = Clock.systemUTC() lazy val privateKey = { @@ -73,7 +73,7 @@ class GoogleOAuth2Spec success("""{"access_token": "token", "token_type": "String", "expires_in": 3600}""", "application/json")))) - implicit val settings = GoogleSettings().requestSettings + implicit val settings: RequestSettings = GoogleSettings().requestSettings GoogleOAuth2.getAccessToken("email", privateKey, scopes).futureValue should matchPattern { case AccessToken("token", exp) if exp > (System.currentTimeMillis / 1000L + 3000L) => } diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala index 5816d906e..7d7e2342a 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala @@ -43,7 +43,8 @@ class OAuth2CredentialsSpec } import system.dispatcher - implicit val settings = GoogleSettings().requestSettings + + implicit val settings: RequestSettings = GoogleSettings().requestSettings implicit val clock = Clock.systemUTC() final object AccessTokenProvider { diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala index aa75e9cc4..56a75389c 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala @@ -76,7 +76,7 @@ class GoogleHttpSpec http } - implicit val settings = GoogleSettings().requestSettings + implicit val settings: RequestSettings = GoogleSettings().requestSettings "GoogleHttp" must { @@ -163,7 +163,7 @@ class GoogleHttpSpec when(credentials.get()(any[ExecutionContext], any[RequestSettings])).thenReturn( Future.failed(GoogleOAuth2Exception(ErrorInfo())), Future.failed(new AnotherException)) - implicit val settingsWithMockedCredentials = GoogleSettings().copy(credentials = credentials) + implicit val settingsWithMockedCredentials: GoogleSettings = GoogleSettings().copy(credentials = credentials) val http = mockHttp when( diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala index 9349cc71b..61070294c 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala @@ -37,7 +37,7 @@ private[fcm] object FcmFlows { private[fcm] def fcmWithData[T](conf: FcmSettings): Flow[(FcmNotification, T), (FcmResponse, T), NotUsed] = Flow .fromMaterializer { (mat, attr) => - implicit val settings = resolveSettings(conf)(mat, attr) + implicit val settings: GoogleSettings = resolveSettings(conf)(mat, attr) val sender = new FcmSender() Flow[(FcmNotification, T)].mapAsync(conf.maxConcurrentConnections) { case (notification, data) => @@ -54,7 +54,7 @@ private[fcm] object FcmFlows { private[fcm] def fcm(conf: FcmSettings): Flow[FcmNotification, FcmResponse, NotUsed] = Flow .fromMaterializer { (mat, attr) => - implicit val settings = resolveSettings(conf)(mat, attr) + implicit val settings: GoogleSettings = resolveSettings(conf)(mat, attr) val sender = new FcmSender() Flow[FcmNotification].mapAsync(conf.maxConcurrentConnections) { notification => sender.send(Http(mat.system), FcmSend(conf.isTest, notification))(mat, settings) diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala index 1fd2d9753..4e1768204 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala @@ -86,11 +86,13 @@ private[fcm] object FcmJsonSupport extends DefaultJsonProtocol with SprayJsonSup } // app -> google - implicit val webPushNotificationJsonFormat: RootJsonFormat[WebPushNotification] = jsonFormat3(WebPushNotification) + implicit val webPushNotificationJsonFormat: RootJsonFormat[WebPushNotification] = + jsonFormat3(WebPushNotification.apply) implicit val webPushConfigJsonFormat: RootJsonFormat[WebPushConfig] = jsonFormat3(WebPushConfig.apply) - implicit val androidNotificationJsonFormat: RootJsonFormat[AndroidNotification] = jsonFormat11(AndroidNotification) + implicit val androidNotificationJsonFormat: RootJsonFormat[AndroidNotification] = + jsonFormat11(AndroidNotification.apply) implicit val androidConfigJsonFormat: RootJsonFormat[AndroidConfig] = jsonFormat6(AndroidConfig.apply) - implicit val basicNotificationJsonFormat: RootJsonFormat[BasicNotification] = jsonFormat2(BasicNotification) + implicit val basicNotificationJsonFormat: RootJsonFormat[BasicNotification] = jsonFormat2(BasicNotification.apply) implicit val sendableFcmNotificationJsonFormat: RootJsonFormat[FcmNotification] = jsonFormat8(FcmNotification.apply) - implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend) + implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend.apply) } diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala index 1046cdba7..fdc9008b4 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala @@ -249,10 +249,10 @@ private[fcm] object FcmJsonSupport extends DefaultJsonProtocol with SprayJsonSup implicit val fcmOptionsJsonFormat: RootJsonFormat[FcmOptions] = jsonFormat1(FcmOptions.apply) implicit val apnsFcmOptionsJsonFormat: RootJsonFormat[ApnsFcmOptions] = jsonFormat2(ApnsFcmOptions.apply) implicit val webPushFcmOptionsJsonFormat: RootJsonFormat[WebPushFcmOptions] = jsonFormat2(WebPushFcmOptions.apply) - implicit val androidColorJsonFormat: RootJsonFormat[Color] = jsonFormat4(Color) + implicit val androidColorJsonFormat: RootJsonFormat[Color] = jsonFormat4(Color.apply) implicit val androidLightSettingsJsonFormat: RootJsonFormat[LightSettings] = jsonFormat3(LightSettings.apply) implicit val androidConfigJsonFormat: RootJsonFormat[AndroidConfig] = jsonFormat8(AndroidConfig.apply) implicit val basicNotificationJsonFormat: RootJsonFormat[BasicNotification] = jsonFormat3(BasicNotification.apply) implicit val mainFcmNotificationJsonFormat: RootJsonFormat[FcmNotification] = jsonFormat9(FcmNotification.apply) - implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend) + implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend.apply) } diff --git a/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala b/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala index 66a605dcf..8b8e46a51 100644 --- a/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala +++ b/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala @@ -13,9 +13,9 @@ package docs.scaladsl -import org.apache.pekko.actor.ActorSystem //#imports import org.apache.pekko +import pekko.actor.ActorSystem import pekko.stream.connectors.google.firebase.fcm.FcmSettings import pekko.stream.connectors.google.firebase.fcm.v1.models._ import pekko.stream.connectors.google.firebase.fcm.v1.scaladsl.GoogleFcm @@ -28,7 +28,7 @@ import scala.concurrent.Future class FcmExamples { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #simple-send val fcmConfig = FcmSettings() diff --git a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala index 16dff540f..a2d4d4922 100644 --- a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala +++ b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala @@ -52,13 +52,13 @@ class FcmSenderSpec override def afterAll(): Unit = TestKit.shutdownActorSystem(system) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 2.seconds, interval = 50.millis) implicit val executionContext: ExecutionContext = system.dispatcher implicit val conf = FcmSettings() - implicit val settings = GoogleSettings().copy(projectId = "projectId") + implicit val settings: GoogleSettings = GoogleSettings().copy(projectId = "projectId") "FcmSender" should { diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala index 43dc29854..b18579e45 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala @@ -36,7 +36,7 @@ private[hbase] class HBaseFlowStage[A](settings: HTableSettings[A]) extends Grap override protected def logSource = classOf[HBaseFlowStage[A]] - implicit val connection = connect(settings.conf) + implicit val connection: Connection = connect(settings.conf) lazy val table: Table = getOrCreateTable(settings.tableName, settings.columnFamilies).get diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala index e9935121a..7c8a30bcc 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala @@ -17,7 +17,7 @@ import org.apache.pekko import pekko.stream.{ Attributes, Outlet, SourceShape } import pekko.stream.connectors.hbase.HTableSettings import pekko.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging } -import org.apache.hadoop.hbase.client.{ Result, Scan, Table } +import org.apache.hadoop.hbase.client.{ Connection, Result, Scan, Table } import scala.util.control.NonFatal @@ -40,7 +40,7 @@ private[hbase] final class HBaseSourceLogic[A](scan: Scan, with StageLogging with HBaseCapabilities { - implicit val connection = connect(settings.conf) + implicit val connection: Connection = connect(settings.conf) lazy val table: Table = getOrCreateTable(settings.tableName, settings.columnFamilies).get private var results: java.util.Iterator[Result] = null diff --git a/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala b/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala index 06c4ed2a0..a6a867107 100644 --- a/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala +++ b/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala @@ -41,7 +41,7 @@ class HBaseStageSpec with BeforeAndAfterAll with LogCapturing { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 5.seconds, interval = 500.millis) // #create-converter-put diff --git a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala index 3eca2dbd6..3587f53e4 100644 --- a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala +++ b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala @@ -253,7 +253,7 @@ class HdfsWriterSpec committedOffsets = committedOffsets :+ offset val resF = Source(messagesFromKafka) - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book // Transform message so that we can write to hdfs HdfsWriteMessage(ByteString(book.title), kafkaMessage.offset) diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala index a511e46a5..e2c2273f8 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala @@ -227,5 +227,5 @@ private[pushkit] object PushKitJsonSupport extends DefaultJsonProtocol with Spra implicit val webNotificationJsonFormat: RootJsonFormat[WebNotification] = jsonFormat14(WebNotification.apply) implicit val pushKitNotificationJsonFormat: RootJsonFormat[PushKitNotification] = jsonFormat8( PushKitNotification.apply) - implicit val pushKitSendJsonFormat: RootJsonFormat[PushKitSend] = jsonFormat2(PushKitSend) + implicit val pushKitSendJsonFormat: RootJsonFormat[PushKitSend] = jsonFormat2(PushKitSend.apply) } diff --git a/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala b/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala index 6350b7bec..7a6d0f5e9 100644 --- a/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala +++ b/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala @@ -13,9 +13,9 @@ package docs.scaladsl -import org.apache.pekko.actor.ActorSystem //#imports import org.apache.pekko +import pekko.actor.ActorSystem import pekko.stream.connectors.huawei.pushkit._ import pekko.stream.connectors.huawei.pushkit.scaladsl.HmsPushKit import pekko.stream.connectors.huawei.pushkit.models.AndroidConfig @@ -38,7 +38,7 @@ import scala.concurrent.Future class PushKitExamples { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #simple-send val config = HmsSettings() diff --git a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala index 5ff7206bc..99f46463b 100644 --- a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala +++ b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala @@ -48,7 +48,7 @@ class HmsTokenApiSpec override def afterAll() = TestKit.shutdownActorSystem(system) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 2.seconds, interval = 50.millis) val config = HmsSettings() diff --git a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala index d047ca0f5..cc83e885f 100644 --- a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala +++ b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala @@ -50,7 +50,7 @@ class PushKitSenderSpec override def afterAll() = TestKit.shutdownActorSystem(system) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 6.seconds, interval = 50.millis) implicit val executionContext: ExecutionContext = system.dispatcher diff --git a/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala b/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala index 8178b2c69..246443dba 100644 --- a/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala @@ -43,7 +43,7 @@ class FlowSpec with ScalaFutures with LogCapturing { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() final val DatabaseName = this.getClass.getSimpleName @@ -102,7 +102,7 @@ class FlowSpec committedOffsets = committedOffsets :+ offset val f1 = Source(messagesFromKafka) - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val cpu = kafkaMessage.cpu println("hostname: " + cpu.getHostname) @@ -111,7 +111,7 @@ class FlowSpec .groupedWithin(10, 50.millis) .via( InfluxDbFlow.typedWithPassThrough(classOf[InfluxDbFlowCpu])) - .map { messages: Seq[InfluxDbWriteResult[InfluxDbFlowCpu, KafkaOffset]] => + .map { (messages: Seq[InfluxDbWriteResult[InfluxDbFlowCpu, KafkaOffset]]) => messages.foreach { message => commitToKafka(message.writeMessage.passThrough) } diff --git a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala index 3072b825e..35c86547d 100644 --- a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala @@ -39,7 +39,7 @@ class InfluxDbSourceSpec final val DatabaseName = "InfluxDbSourceSpec" - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() implicit var influxDB: InfluxDB = _ diff --git a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala index 61d35d8f6..4de4d3817 100644 --- a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala @@ -41,7 +41,7 @@ class InfluxDbSpec with ScalaFutures with LogCapturing { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() final val DatabaseName = this.getClass.getSimpleName @@ -81,7 +81,7 @@ class InfluxDbSpec // #run-typed val f1 = InfluxDbSource .typed(classOf[InfluxDbSpecCpu], InfluxDbReadSettings(), influxDB, query) - .map { cpu: InfluxDbSpecCpu => + .map { (cpu: InfluxDbSpecCpu) => { val clonedCpu = cpu.cloneAt(cpu.getTime.plusSeconds(60000)) List(InfluxDbWriteMessage(clonedCpu)) diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala index 85891ae3a..2209d120d 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala @@ -125,12 +125,12 @@ private[ironmq] final class IronMqPullStage(queueName: String, settings: IronMqS buffer = buffer.tail } - private val updateBuffer = getAsyncCallback { xs: List[ReservedMessage] => + private val updateBuffer = getAsyncCallback { (xs: List[ReservedMessage]) => buffer = buffer ::: xs deliveryMessages() } - private val updateFetching = getAsyncCallback { x: Boolean => + private val updateFetching = getAsyncCallback { (x: Boolean) => fetching = x } } diff --git a/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala b/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala index 763e7d57b..f0a930975 100644 --- a/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala +++ b/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala @@ -40,7 +40,7 @@ abstract class IronMqSpec override implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 15.seconds, interval = 1.second) val DefaultActorSystemTerminateTimeout: Duration = 10.seconds - private implicit val ec = ExecutionContext.global + private implicit val ec: ExecutionContext = ExecutionContext.global private var mutableIronMqClient = Option.empty[IronMqClient] private var mutableConfig = Option.empty[Config] diff --git a/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala index 01eb1cc6b..5c33cf710 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala @@ -35,7 +35,7 @@ import scala.util.{ Failure, Success } class JmsBufferedAckConnectorsSpec extends JmsSharedServerSpec { - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Ack Connectors" should { "publish and consume strings through a queue" in withConnectionFactory() { connectionFactory => diff --git a/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala index 5184eb5dc..7da5909b3 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala @@ -26,7 +26,7 @@ import scala.concurrent.duration._ import scala.concurrent.Future class JmsIbmmqConnectorsSpec extends JmsSpec { - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Ibmmq Connectors" should { val queueConnectionFactory = { diff --git a/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala index 5b8d74933..4c61c2481 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala @@ -35,7 +35,7 @@ class JmsTxConnectorsSpec extends JmsSharedServerSpec { private final val log = LoggerFactory.getLogger(classOf[JmsTxConnectorsSpec]) - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Transactional Connectors" should { "publish and consume strings through a queue" in withConnectionFactory() { connectionFactory => diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala index f5511597a..24c698119 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala @@ -37,7 +37,7 @@ abstract class JmsSpec with MockitoSugar with LogCapturing { - implicit val system = ActorSystem(this.getClass.getSimpleName) + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName) val consumerConfig = system.settings.config.getConfig(JmsConsumerSettings.configPath) val producerConfig = system.settings.config.getConfig(JmsProducerSettings.configPath) diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala index 2f2c5b4a7..10339609b 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala @@ -33,7 +33,7 @@ import scala.util.{ Failure, Success } class JmsAckConnectorsSpec extends JmsSpec { - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Ack Connectors" should { "publish and consume strings through a queue" in withConnectionFactory() { connectionFactory => diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala index e8b641806..1716bfd87 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala @@ -40,7 +40,7 @@ object ShardIterator { override final val shardIteratorType: ShardIteratorType = ShardIteratorType.TRIM_HORIZON } - case class AtTimestamp private (value: Instant) extends ShardIterator { + case class AtTimestamp private[kinesis] (value: Instant) extends ShardIterator { override final val timestamp: Option[Instant] = Some(value) override final val startingSequenceNumber: Option[String] = None diff --git a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala index b23a28f50..bca357c36 100644 --- a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala +++ b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala @@ -269,7 +269,7 @@ class KinesisSchedulerSourceSpec var recordProcessor: ShardRecordProcessor = _ var otherRecordProcessor: ShardRecordProcessor = _ - private val schedulerBuilder = { x: ShardRecordProcessorFactory => + private val schedulerBuilder = { (x: ShardRecordProcessorFactory) => recordProcessor = x.shardRecordProcessor() otherRecordProcessor = x.shardRecordProcessor() semaphore.release() @@ -334,12 +334,12 @@ class KinesisSchedulerSourceSpec var latestRecord: KinesisClientRecord = _ val allRecordsPushed: Future[Unit] = Future { for (i <- 1 to 3) { - val record = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn("1") - when(record.subSequenceNumber).thenReturn(i.toLong) + val clientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn("1") + when(clientRecord.subSequenceNumber).thenReturn(i.toLong) sourceProbe.sendNext( new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-1", @@ -349,7 +349,7 @@ class KinesisSchedulerSourceSpec override def forceCheckpoint(): Unit = checkpointer(record) }) - latestRecord = record + latestRecord = clientRecord } } @@ -374,11 +374,11 @@ class KinesisSchedulerSourceSpec val allRecordsPushed: Future[Unit] = Future { for (i <- 1 to 3) { - val record = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn(i.toString) + val clientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn(i.toString) sourceProbe.sendNext( new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-1", @@ -388,14 +388,14 @@ class KinesisSchedulerSourceSpec override def forceCheckpoint(): Unit = checkpointerShard1(record) }) - latestRecordShard1 = record + latestRecordShard1 = clientRecord } for (i <- 1 to 3) { - val record = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn(i.toString) + val clientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn(i.toString) sourceProbe.sendNext( new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-2", @@ -405,7 +405,7 @@ class KinesisSchedulerSourceSpec override def forceCheckpoint(): Unit = checkpointerShard2(record) }) - latestRecordShard2 = record + latestRecordShard2 = clientRecord } } @@ -422,12 +422,12 @@ class KinesisSchedulerSourceSpec } "fail with Exception if checkpoint action fails" in new KinesisSchedulerCheckpointContext { - val record: KinesisClientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn("1") + val clientRecord: KinesisClientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn("1") val checkpointer: KinesisClientRecord => Unit = org.mockito.Mockito.mock(classOf[KinesisClientRecord => Unit]) val committableRecord: CommittableRecord = new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-1", @@ -439,7 +439,7 @@ class KinesisSchedulerSourceSpec sourceProbe.sendNext(committableRecord) val failure = new RuntimeException() - when(checkpointer.apply(record)).thenThrow(failure) + when(checkpointer.apply(clientRecord)).thenThrow(failure) sinkProbe.request(1) diff --git a/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala b/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala index 09af15528..7804296e9 100644 --- a/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala +++ b/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala @@ -52,11 +52,11 @@ class MongoSinkSpec fromRegistries(fromProviders(classOf[Number], classOf[DomainObject]), DEFAULT_CODEC_REGISTRY): @nowarn( "msg=match may not be exhaustive") - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() + implicit val defaultPatience: PatienceConfig = + PatienceConfig(timeout = 5.seconds, interval = 50.millis) override protected def beforeAll(): Unit = { - implicit val patienceConfig = - PatienceConfig(timeout = 2.seconds, interval = 100.millis) Source.fromPublisher(db.drop()).runWith(Sink.headOption).futureValue } @@ -69,9 +69,6 @@ class MongoSinkSpec db.getCollection("domainObjectsSink", classOf[DomainObject]).withCodecRegistry(codecRegistry) private val domainObjectsDocumentColl = db.getCollection("domainObjectsSink") - implicit val defaultPatience = - PatienceConfig(timeout = 5.seconds, interval = 50.millis) - override def afterEach(): Unit = { Source.fromPublisher(numbersDocumentColl.deleteMany(new Document())).runWith(Sink.head).futureValue Source.fromPublisher(domainObjectsDocumentColl.deleteMany(new Document())).runWith(Sink.head).futureValue diff --git a/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala b/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala index 1e390cbb1..8037a3dff 100644 --- a/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala +++ b/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala @@ -43,7 +43,7 @@ class MongoSourceSpec with LogCapturing { // #init-system - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #init-system override protected def beforeAll(): Unit = @@ -74,7 +74,7 @@ class MongoSourceSpec private val numbersDocumentColl = db.getCollection("numbers") - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 5.seconds, interval = 50.millis) override def afterEach(): Unit = diff --git a/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala b/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala index efce44f7a..f4dff41f9 100644 --- a/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala +++ b/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala @@ -159,7 +159,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with // Copy source to sink1 through ODocument stream val f1 = OrientDbSource( sourceClass, - OrientDbSourceSettings(oDatabase)).map { message: OrientDbReadResult[ODocument] => + OrientDbSourceSettings(oDatabase)).map { (message: OrientDbReadResult[ODocument]) => OrientDbWriteMessage(message.oDocument) } .groupedWithin(10, 50.millis) @@ -173,7 +173,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with // #run-odocument val result: Future[immutable.Seq[String]] = OrientDbSource( sink4, - OrientDbSourceSettings(oDatabase)).map { message: OrientDbReadResult[ODocument] => + OrientDbSourceSettings(oDatabase)).map { (message: OrientDbReadResult[ODocument]) => message.oDocument.field[String]("book_title") } .runWith(Sink.seq) @@ -197,7 +197,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with val f1 = OrientDbSource( sourceClass, - OrientDbSourceSettings(oDatabase)).map { message: OrientDbReadResult[ODocument] => + OrientDbSourceSettings(oDatabase)).map { (message: OrientDbReadResult[ODocument]) => OrientDbWriteMessage(message.oDocument) } .groupedWithin(10, 50.millis) @@ -231,7 +231,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with // #run-typed val streamCompletion: Future[Done] = OrientDbSource .typed(sourceClass, OrientDbSourceSettings(oDatabase), classOf[OrientDbTest.source1]) - .map { m: OrientDbReadResult[OrientDbTest.source1] => + .map { (m: OrientDbReadResult[OrientDbTest.source1]) => val db: ODatabaseDocumentTx = oDatabase.acquire db.setDatabaseOwner(new OObjectDatabaseTx(db)) ODatabaseRecordThreadLocal.instance.set(db) @@ -267,7 +267,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with committedOffsets = committedOffsets :+ offset val f1 = Source(messagesFromKafka) - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title println("title: " + book.title) @@ -279,7 +279,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with OrientDbFlow.createWithPassThrough( sink7, OrientDbWriteSettings(oDatabase))) - .map { messages: Seq[OrientDbWriteMessage[ODocument, KafkaOffset]] => + .map { (messages: Seq[OrientDbWriteMessage[ODocument, KafkaOffset]]) => messages.foreach { message => commitToKafka(message.passThrough) } diff --git a/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala b/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala index 7bf6095c4..6be579e03 100644 --- a/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala +++ b/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala @@ -18,6 +18,8 @@ import pekko.actor.ActorSystem import pekko.stream.connectors.pravega.{ PravegaEvent, ReaderSettingsBuilder, + TableReaderSettingsBuilder, + TableWriterSettings, TableWriterSettingsBuilder, WriterSettingsBuilder } @@ -27,21 +29,21 @@ import io.pravega.client.stream.Serializer import io.pravega.client.stream.impl.UTF8StringSerializer import java.nio.ByteBuffer -import pekko.stream.connectors.pravega.TableReaderSettingsBuilder import pekko.stream.connectors.pravega.scaladsl.PravegaTable import pekko.stream.connectors.pravega.scaladsl.Pravega + import scala.util.Using import io.pravega.client.tables.TableKey class PravegaReadWriteDocs { - implicit val system = ActorSystem("PravegaDocs") + implicit val system: ActorSystem = ActorSystem("PravegaDocs") val serializer = new UTF8StringSerializer implicit def personSerialiser: Serializer[Person] = ??? - implicit val intSerializer = new Serializer[Int] { + implicit val intSerializer: Serializer[Int] = new Serializer[Int] { override def serialize(value: Int): ByteBuffer = { val buff = ByteBuffer.allocate(4).putInt(value) buff.position(0) @@ -89,7 +91,7 @@ class PravegaReadWriteDocs { Pravega .source(readerGroup, readerSettings) - .to(Sink.foreach { event: PravegaEvent[String] => + .to(Sink.foreach { (event: PravegaEvent[String]) => val message: String = event.message processMessage(message) }) @@ -99,7 +101,7 @@ class PravegaReadWriteDocs { } - implicit val tablewriterSettings = TableWriterSettingsBuilder[Int, Person]() + implicit val tablewriterSettings: TableWriterSettings[Int, Person] = TableWriterSettingsBuilder[Int, Person]() .withKeyExtractor(id => new TableKey(intSerializer.serialize(id))) .build() diff --git a/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala b/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala index fa0362dbc..b109878c9 100644 --- a/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala +++ b/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala @@ -31,9 +31,9 @@ import io.pravega.client.tables.TableKey class PravegaSettingsSpec extends PravegaBaseSpec with Matchers { - implicit val serializer = new UTF8StringSerializer + implicit val serializer: UTF8StringSerializer = new UTF8StringSerializer - implicit val intSerializer = new Serializer[Int] { + implicit val intSerializer: Serializer[Int] = new Serializer[Int] { override def serialize(value: Int): ByteBuffer = { val buff = ByteBuffer.allocate(4).putInt(value) buff.position(0) diff --git a/pravega/src/test/scala/docs/scaladsl/Serializers.scala b/pravega/src/test/scala/docs/scaladsl/Serializers.scala index 20edef785..6eb33c596 100644 --- a/pravega/src/test/scala/docs/scaladsl/Serializers.scala +++ b/pravega/src/test/scala/docs/scaladsl/Serializers.scala @@ -19,9 +19,9 @@ import io.pravega.client.stream.impl.UTF8StringSerializer object Serializers { - implicit val stringSerializer = new UTF8StringSerializer() + implicit val stringSerializer: UTF8StringSerializer = new UTF8StringSerializer() - implicit val personSerializer = new Serializer[Person] { + implicit val personSerializer: Serializer[Person] = new Serializer[Person] { def serialize(x: Person): ByteBuffer = { val name = x.firstname.getBytes("UTF-8") val buff = ByteBuffer.allocate(4 + name.length).putInt(x.id) @@ -38,7 +38,7 @@ object Serializers { } - implicit val intSerializer = new Serializer[Int] { + implicit val intSerializer: Serializer[Int] = new Serializer[Int] { override def serialize(value: Int): ByteBuffer = { val buff = ByteBuffer.allocate(4).putInt(value) buff.position(0) diff --git a/project/Common.scala b/project/Common.scala index 8dd890f1a..2f020dff0 100644 --- a/project/Common.scala +++ b/project/Common.scala @@ -30,6 +30,8 @@ object Common extends AutoPlugin { override def requires = JvmPlugin && HeaderPlugin && ApacheSonatypePlugin && DynVerPlugin + val isScala3 = Def.setting(scalaBinaryVersion.value == "3") + override def globalSettings = Seq( scmInfo := Some(ScmInfo(url("https://github.com/apache/incubator-pekko-connectors"), "git@github.com:apache/incubator-pekko-connectors.git")), @@ -43,6 +45,12 @@ object Common extends AutoPlugin { // Ignore unused keys which affect documentation excludeLintKeys ++= Set(scmInfo, projectInfoVersion, autoAPIMappings)) + val packagesToSkip = "org.apache.pekko.pattern:" + // for some reason Scaladoc creates this + "org.mongodb.scala:" + // this one is a mystery as well + // excluding generated grpc classes, except the model ones (com.google.pubsub) + "com.google.api:com.google.cloud:com.google.iam:com.google.logging:" + + "com.google.longrunning:com.google.protobuf:com.google.rpc:com.google.type" + override lazy val projectSettings = Dependencies.Common ++ Seq( projectInfoVersion := (if (isSnapshot.value) "snapshot" else version.value), crossVersion := CrossVersion.binary, @@ -64,13 +72,8 @@ object Common extends AutoPlugin { "-doc-version", version.value, "-sourcepath", - (ThisBuild / baseDirectory).value.toString, - "-skip-packages", - "org.apache.pekko.pattern:" + // for some reason Scaladoc creates this - "org.mongodb.scala:" + // this one is a mystery as well - // excluding generated grpc classes, except the model ones (com.google.pubsub) - "com.google.api:com.google.cloud:com.google.iam:com.google.logging:" + - "com.google.longrunning:com.google.protobuf:com.google.rpc:com.google.type"), + (ThisBuild / baseDirectory).value.toString), + Compile / doc / scalacOptions := scalacOptions.value, Compile / doc / scalacOptions ++= Seq( "-doc-source-url", { @@ -79,6 +82,13 @@ object Common extends AutoPlugin { }, "-doc-canonical-base-url", "https://pekko.apache.org/api/pekko-connectors/current/"), + Compile / doc / scalacOptions ++= { + if (isScala3.value) { + Seq("-skip-packages:" + packagesToSkip) + } else { + Seq("-skip-packages", packagesToSkip) + } + }, Compile / doc / scalacOptions -= "-Werror", compile / javacOptions ++= Seq( "-Xlint:cast", diff --git a/project/Dependencies.scala b/project/Dependencies.scala index d5630b7b9..458601ffa 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -16,7 +16,8 @@ object Dependencies { val Scala213 = "2.13.10" // update even in link-validator.conf val Scala212 = "2.12.17" - val ScalaVersions = Seq(Scala213, Scala212) + val Scala3 = "3.3.0" + val ScalaVersions = Seq(Scala213, Scala212, Scala3) val PekkoVersion = "1.0.1" val PekkoBinaryVersion = "current" @@ -62,7 +63,7 @@ object Dependencies { val testkit = Seq( libraryDependencies := Seq( - "org.scala-lang.modules" %% "scala-collection-compat" % "2.2.0", + "org.scala-lang.modules" %% "scala-collection-compat" % "2.10.0", "org.apache.pekko" %% "pekko-stream" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion, "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, @@ -87,6 +88,7 @@ object Dependencies { "com.fasterxml.jackson.core" % "jackson-databind" % JacksonDatabindVersion) val Amqp = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.rabbitmq" % "amqp-client" % "5.14.2") ++ Mockito) @@ -100,6 +102,7 @@ object Dependencies { ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito) val AzureStorageQueue = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.microsoft.azure" % "azure-storage" % "8.0.0")) @@ -108,6 +111,7 @@ object Dependencies { val CassandraDriverVersionInDocs = "4.15" val Cassandra = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("com.datastax.oss" % "java-driver-core" % CassandraDriverVersion) .exclude("com.github.spotbugs", "spotbugs-annotations") @@ -116,6 +120,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided)) val Couchbase = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.couchbase.client" % "java-client" % CouchbaseVersion, "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", @@ -141,6 +146,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion)) val Elasticsearch = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -151,16 +157,19 @@ object Dependencies { "com.google.jimfs" % "jimfs" % "1.2" % Test)) val AvroParquet = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.parquet" % "parquet-avro" % "1.10.1", ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), "com.sksamuel.avro4s" %% "avro4s-core" % "4.1.1" % Test, "org.scalacheck" %% "scalacheck" % scalaCheckVersion % Test, - "org.specs2" %% "specs2-core" % "4.8.3" % Test, - "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) + "org.specs2" %% "specs2-core" % "4.20.0" % Test, // MIT like: https://github.com/etorreborre/specs2/blob/master/LICENSE.txt + "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html + )) val Ftp = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "commons-net" % "commons-net" % "3.8.0", "com.hierynomus" % "sshj" % "0.33.0")) @@ -169,6 +178,7 @@ object Dependencies { val GeodeVersionForDocs = "115" val Geode = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq("geode-core", "geode-cq") .map("org.apache.geode" % _ % GeodeVersion) ++ @@ -177,6 +187,7 @@ object Dependencies { "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies) val GoogleCommon = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -185,6 +196,7 @@ object Dependencies { "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) val GoogleBigQuery = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-jackson" % PekkoHttpVersion % Provided, @@ -194,6 +206,7 @@ object Dependencies { "com.fasterxml.jackson.datatype" % "jackson-datatype-jsr310" % JacksonDatabindVersion % Test, "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) val GoogleBigQueryStorage = Seq( + crossScalaVersions -= Scala3, // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-bigquerystorage/tree/master/proto-google-cloud-bigquerystorage-v1 @@ -209,12 +222,14 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion) ++ Mockito) val GooglePubSub = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, "com.github.tomakehurst" % "wiremock" % "2.27.2" % Test) ++ Mockito) val GooglePubSubGrpc = Seq( + crossScalaVersions -= Scala3, // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-pubsub/tree/master/proto-google-cloud-pubsub-v1/ @@ -225,11 +240,13 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) val GoogleFcm = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion) ++ Mockito) val GoogleStorage = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -253,6 +270,7 @@ object Dependencies { val HadoopVersion = "3.2.1" val Hdfs = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("org.apache.hadoop" % "hadoop-client" % HadoopVersion).exclude("log4j", "log4j").exclude("org.slf4j", "slf4j-log4j12"), @@ -266,6 +284,7 @@ object Dependencies { "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) val HuaweiPushKit = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -276,12 +295,14 @@ object Dependencies { "org.influxdb" % "influxdb-java" % InfluxDBJavaVersion)) val IronMq = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.mdedetrich" %% "pekko-stream-circe" % "1.0.0", "org.mdedetrich" %% "pekko-http-circe" % "1.0.0")) val Jms = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "javax.jms" % "jms" % "1.1" % Provided, "com.ibm.mq" % "com.ibm.mq.allclient" % "9.2.5.0" % Test, @@ -293,6 +314,7 @@ object Dependencies { "https://repository.jboss.org/nexus/content/groups/public")) +: externalResolvers.value) val JsonStreaming = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.github.jsurfer" % "jsurfer-jackson" % "1.6.0") ++ JacksonDatabindDependencies) @@ -315,6 +337,7 @@ object Dependencies { "org.apache.kudu" % "kudu-client" % KuduVersion % Test)) val MongoDb = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.mongodb.scala" %% "mongo-scala-driver" % "4.4.0")) @@ -323,6 +346,7 @@ object Dependencies { "org.eclipse.paho" % "org.eclipse.paho.client.mqttv3" % "1.2.5")) val MqttStreaming = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-actor-typed" % PekkoVersion, "org.apache.pekko" %% "pekko-actor-testkit-typed" % PekkoVersion % Test, @@ -351,6 +375,7 @@ object Dependencies { )) val S3 = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-xml" % PekkoHttpVersion, @@ -377,6 +402,7 @@ object Dependencies { val SlickVersion = "3.3.3" val Slick = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.typesafe.slick" %% "slick" % SlickVersion, "com.typesafe.slick" %% "slick-hikaricp" % SlickVersion, @@ -426,6 +452,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-http-testkit" % PekkoHttpVersion % Test)) val UnixDomainSocket = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.github.jnr" % "jffi" % "1.3.1", // classifier "complete", // Is the classifier needed anymore? "com.github.jnr" % "jnr-unixsocket" % "0.38.5")) diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala index e21449b4a..461fbe92f 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala @@ -116,7 +116,7 @@ object ResourceSettings { * there is only one instance of the resource instantiated per Actor System. */ final class ResourceExt private (sys: ExtendedActorSystem) extends Extension { - implicit val resource = Resource(ResourceSettings()(sys)) + implicit val resource: Resource = Resource(ResourceSettings()(sys)) sys.registerOnTermination(resource.cleanup()) } diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala index db019dcd2..f5d851291 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala @@ -14,7 +14,6 @@ package org.apache.pekko.stream.connectors.s3.impl import java.util.UUID - import org.apache.pekko import pekko.actor.ActorSystem import pekko.http.scaladsl.Http @@ -33,6 +32,8 @@ import software.amazon.awssdk.auth.credentials._ import software.amazon.awssdk.regions.Region import software.amazon.awssdk.regions.providers._ +import scala.concurrent.ExecutionContext + class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with IntegrationPatience with LogCapturing { // test fixtures @@ -55,7 +56,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with val multipartUpload = MultipartUpload("test-bucket", "testKey", "uploadId") it should "initiate multipart upload when the region is us-east-1" in { - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val req = HttpRequests.initiateMultipartUploadRequest( @@ -74,7 +75,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with other regions" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val req = HttpRequests.initiateMultipartUploadRequest( @@ -93,21 +94,22 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "throw an error if path-style access is false and the bucket name contains non-LDH characters" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1) + implicit val settings: S3Settings = getSettings(s3Region = Region.EU_WEST_1) assertThrows[IllegalUriException]( HttpRequests.getDownloadRequest(S3Location("invalid_bucket_name", "image-1024@2x"))) } it should "throw an error if the key uses `..`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1) + implicit val settings: S3Settings = getSettings(s3Region = Region.EU_WEST_1) assertThrows[IllegalUriException]( HttpRequests.getDownloadRequest(S3Location("validbucket", "../other-bucket/image-1024@2x"))) } it should "throw an error when using `..` with path-style access" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) assertThrows[IllegalUriException]( HttpRequests.getDownloadRequest(S3Location("invalid/../bucket_name", "image-1024@2x"))) @@ -120,7 +122,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with path-style access in region us-east-1" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.initiateMultipartUploadRequest( @@ -133,7 +136,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with path-style access in region us-east-1" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.getDownloadRequest(location) @@ -143,7 +147,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with path-style access in other regions" in { - implicit val settings = getSettings(s3Region = Region.US_WEST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_WEST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.initiateMultipartUploadRequest( @@ -156,7 +161,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with path-style access in other regions" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.getDownloadRequest(location) @@ -166,7 +172,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") val req = HttpRequests.getDownloadRequest(location) @@ -180,7 +187,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with // into an object at path /[empty string]/... // added this test because of a tricky uri building issue // in case of pathStyleAccess = false - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "/test/foo.txt") @@ -193,7 +200,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with it should "support download requests with keys ending with /" in { // object with a slash at the end of the filename should be accessible - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "/test//") @@ -205,7 +212,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with keys containing spaces" in { - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "test folder/test file.txt") @@ -217,7 +224,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with keys containing plus" in { - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "test folder/1 + 2 = 3") val req = HttpRequests.getDownloadRequest(location) @@ -227,7 +234,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with keys containing spaces with path-style access in other regions" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) val location = S3Location("bucket", "test folder/test file.txt") @@ -239,7 +247,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "add versionId query parameter when provided" in { - implicit val settings = getSettings().withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = getSettings().withAccessStyle(AccessStyle.PathAccessStyle) val location = S3Location("bucket", "test/foo.txt") val versionId = "123456" @@ -253,7 +261,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support multipart init upload requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") val req = HttpRequests.initiateMultipartUploadRequest( @@ -267,7 +276,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support multipart upload part requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") val req = HttpRequests.uploadPartRequest(multipartUpload, 1, MemoryChunk(ByteString.empty)) @@ -278,7 +288,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly multipart upload part request with customer keys server side encryption" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) val myKey = "my-key" val md5Key = "md5-key" val s3Headers = ServerSideEncryption.customerKeys(myKey).withMd5(md5Key).headersFor(UploadPart) @@ -290,8 +301,9 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support multipart upload complete requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") - implicit val executionContext = scala.concurrent.ExecutionContext.global + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val executionContext: ExecutionContext = ExecutionContext.global val req = HttpRequests.completeMultipartUploadRequest(multipartUpload, (1, "part") :: Nil, Nil).futureValue @@ -302,7 +314,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with AES-256 server side encryption" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val s3Headers = ServerSideEncryption.aes256().headersFor(InitiateMultipartUpload) val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -310,7 +322,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with aws:kms server side encryption" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val testArn = "arn:aws:kms:my-region:my-account-id:key/my-key-id" val s3Headers = ServerSideEncryption.kms(testArn).headersFor(InitiateMultipartUpload) val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -320,7 +332,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with customer keys encryption" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val myKey = "my-key" val md5Key = "md5-key" val s3Headers = ServerSideEncryption.customerKeys(myKey).withMd5(md5Key).headersFor(InitiateMultipartUpload) @@ -332,7 +344,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with custom s3 storage class" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val s3Headers = S3Headers().withStorageClass(StorageClass.ReducedRedundancy).headers val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -340,7 +352,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with custom s3 headers" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val s3Headers = S3Headers().withCustomHeaders(Map("Cache-Control" -> "no-cache")).headers val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -348,7 +360,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request with no prefix, continuation token or delimiter passed" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.listBucket(location.bucket) @@ -357,7 +370,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request with a prefix and token passed" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.listBucket(location.bucket, Some("random/prefix"), Some("randomToken")) @@ -368,7 +382,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request with a delimiter and token passed" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.listBucket(location.bucket, delimiter = Some("/"), continuationToken = Some("randomToken")) @@ -377,7 +392,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request when using api version 1" in { - implicit val settings = + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2, listBucketApiVersion = ApiVersion.ListBucketVersion1) .withAccessStyle(AccessStyle.PathAccessStyle) @@ -388,7 +403,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request when using api version set to 1 and a continuation token" in { - implicit val settings = + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2, listBucketApiVersion = ApiVersion.ListBucketVersion1) .withAccessStyle(AccessStyle.PathAccessStyle) diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala index c665056e5..5c24dbc7f 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala @@ -62,7 +62,7 @@ class S3StreamSpec(_system: ActorSystem) } val location = S3Location("test-bucket", "test-key") - implicit val settings = + implicit val settings: S3Settings = S3Settings(MemoryBufferType, credentialsProvider, regionProvider, ApiVersion.ListBucketVersion2) val result: HttpRequest = S3Stream.invokePrivate(requestHeaders(getDownloadRequest(location), None)) @@ -86,7 +86,7 @@ class S3StreamSpec(_system: ActorSystem) val location = S3Location("test-bucket", "test-key") val range = ByteRange(1, 4) - implicit val settings = + implicit val settings: S3Settings = S3Settings(MemoryBufferType, credentialsProvider, regionProvider, ApiVersion.ListBucketVersion2) val result: HttpRequest = S3Stream.invokePrivate(requestHeaders(getDownloadRequest(location), Some(range))) diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala index 0ae1be6a6..75621b3bd 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala @@ -41,7 +41,7 @@ class SignerSpec(_system: ActorSystem) with LogCapturing { def this() = this(ActorSystem("SignerSpec")) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(5, Millis)) val credentials = StaticCredentialsProvider.create( diff --git a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala index b0112d3ae..fe20fd3fd 100644 --- a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala +++ b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala @@ -16,6 +16,8 @@ package docs.scaladsl import org.apache.pekko import pekko.Done import pekko.actor.ActorSystem + +import scala.concurrent.ExecutionContext //#important-imports import org.apache.pekko import pekko.stream.connectors.slick.scaladsl._ @@ -26,8 +28,8 @@ import slick.jdbc.GetResult import scala.concurrent.Future object SlickSourceWithPlainSQLQueryExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #source-example implicit val session = SlickSession.forConfig("slick-h2") @@ -62,8 +64,8 @@ object SlickSourceWithPlainSQLQueryExample extends App { } object SlickSourceWithTypedQueryExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #source-with-typed-query implicit val session = SlickSession.forConfig("slick-h2") @@ -94,8 +96,8 @@ object SlickSourceWithTypedQueryExample extends App { } object SlickSinkExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #sink-example implicit val session = SlickSession.forConfig("slick-h2") @@ -125,8 +127,8 @@ object SlickSinkExample extends App { } object SlickFlowExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #flow-example implicit val session = SlickSession.forConfig("slick-h2") @@ -168,8 +170,8 @@ object SlickFlowWithPassThroughExample extends App { def map[B](f: A => B): KafkaMessage[B] = KafkaMessage(f(msg), offset) } - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #flowWithPassThrough-example implicit val session = SlickSession.forConfig("slick-h2") diff --git a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala index 428f4d995..6563dba1f 100644 --- a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala +++ b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala @@ -27,7 +27,7 @@ import slick.dbio.DBIOAction import slick.jdbc.{ GetResult, JdbcProfile } import scala.concurrent.duration._ -import scala.concurrent.{ Await, Future } +import scala.concurrent.{ Await, ExecutionContext, Future } import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec @@ -43,7 +43,7 @@ class SlickSpec with Matchers with LogCapturing { // #init-mat - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #init-mat // #init-session @@ -59,9 +59,9 @@ class SlickSpec def * = (id, name) } - implicit val ec = system.dispatcher - implicit val defaultPatience = PatienceConfig(timeout = 3.seconds, interval = 50.millis) - implicit val getUserResult = GetResult(r => User(r.nextInt(), r.nextString())) + implicit val ec: ExecutionContext = system.dispatcher + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 3.seconds, interval = 50.millis) + implicit val getUserResult: GetResult[User] = GetResult(r => User(r.nextInt(), r.nextString())) val users = (1 to 40).map(i => User(i, s"Name$i")).toSet diff --git a/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala b/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala index efea2cc2a..9fc774f7d 100644 --- a/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala +++ b/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala @@ -34,7 +34,7 @@ class SnsPublisherSpec with IntegrationTestContext with LogCapturing { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 15.seconds, interval = 100.millis) "SNS Publisher sink" should "send string message" in { diff --git a/solr/src/test/scala/docs/scaladsl/SolrSpec.scala b/solr/src/test/scala/docs/scaladsl/SolrSpec.scala index 3d8b80169..f38d9e1cb 100644 --- a/solr/src/test/scala/docs/scaladsl/SolrSpec.scala +++ b/solr/src/test/scala/docs/scaladsl/SolrSpec.scala @@ -90,7 +90,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #run-document val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) val doc: SolrInputDocument = bookToDoc(book) WriteMessage.createUpsertMessage(doc) @@ -138,7 +138,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #run-bean val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val title = tuple.getString("title") WriteMessage.createUpsertMessage(BookBean(title)) } @@ -179,7 +179,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #run-typed val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) WriteMessage.createUpsertMessage(book) } @@ -223,7 +223,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #typeds-flow val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) WriteMessage.createUpsertMessage(book) } @@ -294,7 +294,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #kafka-example // Note: This code mimics Pekko Connectors Kafka APIs val copyCollection = kafkaConsumerSource - .map { kafkaMessage: CommittableMessage => + .map { (kafkaMessage: CommittableMessage) => val book = kafkaMessage.book // Transform message so that we can write to solr WriteMessage.createUpsertMessage(book).withPassThrough(kafkaMessage.committableOffset) @@ -340,7 +340,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) val doc: SolrInputDocument = bookToDoc(book) WriteMessage.createUpsertMessage(doc) @@ -360,7 +360,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #delete-documents val deleteDocuments = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val id = tuple.fields.get("title").toString WriteMessage.createDeleteMessage[SolrInputDocument](id) } @@ -393,7 +393,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val upsertCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) .copy(comment = "Written by good authors.") val doc: SolrInputDocument = bookToDoc(book) @@ -414,7 +414,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #update-atomically-documents val updateCollection = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val id = tuple.fields.get("title").toString val comment = tuple.fields.get("comment").toString WriteMessage.createUpdateMessage[SolrInputDocument]( @@ -461,7 +461,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) WriteMessage.createUpsertMessage(book) } @@ -482,7 +482,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val deleteElements = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val title = tuple.fields.get("title").toString WriteMessage.createDeleteMessage[Book](title) } @@ -512,7 +512,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple).copy(comment = "Written by good authors.", routerOpt = Some("router-value")) WriteMessage.createUpsertMessage(book) @@ -534,7 +534,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val updateCollection = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => WriteMessage .createUpdateMessage[Book]( idField = "title", @@ -581,7 +581,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) val doc: SolrInputDocument = bookToDoc(book) WriteMessage.createUpsertMessage(doc) @@ -601,7 +601,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #delete-documents-query val deleteByQuery = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val title = tuple.fields.get("title").toString WriteMessage.createDeleteByQueryMessage[SolrInputDocument]( s"""title:"$title" """) @@ -657,7 +657,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #kafka-example-PT // Note: This code mimics Pekko Connectors Kafka APIs val copyCollection = kafkaConsumerSource - .map { offset: CommittableOffset => + .map { (offset: CommittableOffset) => // Transform message so that we can write to solr WriteMessage.createPassThrough(offset).withSource(new SolrInputDocument()) } diff --git a/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala b/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala index c830d77c6..2751a01e2 100644 --- a/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala +++ b/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala @@ -248,7 +248,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "fail if any of the messages in the batch request failed" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] when(mockAwsSqsClient.deleteMessageBatch(any[DeleteMessageBatchRequest])) .thenReturn(CompletableFuture.completedFuture { @@ -269,7 +269,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "fail if the batch request failed" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] when(mockAwsSqsClient.deleteMessageBatch(any[DeleteMessageBatchRequest])) .thenReturn( @@ -289,7 +289,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "fail if the client invocation failed" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] when( mockAwsSqsClient.deleteMessageBatch(any[DeleteMessageBatchRequest])).thenThrow(new RuntimeException("error")) @@ -357,7 +357,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "ignore batch of messages" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] val future = // #batch-ignore diff --git a/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala b/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala index f93f1b684..8677c6d25 100644 --- a/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala +++ b/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala @@ -256,7 +256,7 @@ class SqsSourceSpec extends AnyFlatSpec with ScalaFutures with Matchers with Def val customClient: SdkAsyncHttpClient = PekkoHttpClient.builder().withActorSystem(system).build() // #init-custom-client - implicit val customSqsClient = SqsAsyncClient + implicit val customSqsClient: SqsAsyncClient = SqsAsyncClient .builder() .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("x", "x"))) // #init-custom-client diff --git a/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala b/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala index ad410bdbb..2502ff912 100644 --- a/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala +++ b/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala @@ -32,7 +32,7 @@ import org.scalatest.BeforeAndAfterAll import scala.collection.immutable import scala.concurrent.duration.DurationInt -import scala.concurrent.{ Await, Future } +import scala.concurrent.{ Await, ExecutionContext, Future } //#event-source import org.apache.pekko import pekko.http.scaladsl.Http @@ -91,7 +91,7 @@ object EventSourceSpec { import Server._ import context.dispatcher - private implicit val sys = context.system + private implicit val sys: ActorSystem = context.system context.system.scheduler.scheduleOnce(1.second, self, Bind) @@ -147,8 +147,8 @@ object EventSourceSpec { final class EventSourceSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll { import EventSourceSpec._ - private implicit val system = ActorSystem() - private implicit val ec = system.dispatcher + private implicit val system: ActorSystem = ActorSystem() + private implicit val ec: ExecutionContext = system.dispatcher "EventSource" should { "communicate correctly with an instable HTTP server" in { diff --git a/udp/src/test/scala/docs/scaladsl/UdpSpec.scala b/udp/src/test/scala/docs/scaladsl/UdpSpec.scala index 9cdcb3bb1..8f58affa6 100644 --- a/udp/src/test/scala/docs/scaladsl/UdpSpec.scala +++ b/udp/src/test/scala/docs/scaladsl/UdpSpec.scala @@ -41,8 +41,8 @@ class UdpSpec with BeforeAndAfterAll with LogCapturing { - implicit val mat = Materializer(system) - implicit val pat = PatienceConfig(3.seconds, 50.millis) + implicit val mat: Materializer = Materializer(system) + implicit val pat: PatienceConfig = PatienceConfig(3.seconds, 50.millis) // #bind-address val bindToLocal = new InetSocketAddress("localhost", 0) diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala index 4d6c7919a..7927d922b 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala @@ -140,7 +140,7 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends UnixDomainSock backlog: Int = 128, halfClose: Boolean = false): Future[ServerBinding] = bind(path, backlog, halfClose) - .to(Sink.foreach { conn: IncomingConnection => + .to(Sink.foreach { (conn: IncomingConnection) => conn.flow.join(handler).run() }) .run() From f23710ac4c7e05d0961f4eec44df8bac288a1cf1 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Tue, 30 May 2023 20:55:16 +0100 Subject: [PATCH 15/90] Scala3 couchbase support (#128) * initial work on scala3 support for couchbase * Update model.scala * Update CouchbaseFlow.scala * use jackson in test code instead of play-json * Update Dependencies.scala --- .../stream/connectors/couchbase/model.scala | 9 +++++---- .../couchbase/scaladsl/CouchbaseFlow.scala | 16 +++++++++------- .../couchbase/testing/CouchbaseSupport.scala | 15 +++++++++++---- project/Dependencies.scala | 13 +++++++------ 4 files changed, 32 insertions(+), 21 deletions(-) diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala index fdbd08ff5..b40e87582 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/model.scala @@ -270,7 +270,8 @@ sealed trait CouchbaseWriteResult[T <: Document[_]] { /** * Emitted for a successful Couchbase write operation. */ -final case class CouchbaseWriteSuccess[T <: Document[_]] private (override val doc: T) extends CouchbaseWriteResult[T] { +final case class CouchbaseWriteSuccess[T <: Document[_]] private[couchbase] ( + override val doc: T) extends CouchbaseWriteResult[T] { val isSuccess: Boolean = true val isFailure: Boolean = false } @@ -278,7 +279,7 @@ final case class CouchbaseWriteSuccess[T <: Document[_]] private (override val d /** * Emitted for a failed Couchbase write operation. */ -final case class CouchbaseWriteFailure[T <: Document[_]] private (override val doc: T, failure: Throwable) +final case class CouchbaseWriteFailure[T <: Document[_]] private[couchbase] (override val doc: T, failure: Throwable) extends CouchbaseWriteResult[T] { val isSuccess: Boolean = false val isFailure: Boolean = true @@ -296,7 +297,7 @@ sealed trait CouchbaseDeleteResult { /** * Emitted for a successful Couchbase write operation. */ -final case class CouchbaseDeleteSuccess private (override val id: String) extends CouchbaseDeleteResult { +final case class CouchbaseDeleteSuccess private[couchbase] (override val id: String) extends CouchbaseDeleteResult { val isSuccess: Boolean = true val isFailure: Boolean = false } @@ -304,7 +305,7 @@ final case class CouchbaseDeleteSuccess private (override val id: String) extend /** * Emitted for a failed Couchbase write operation. */ -final case class CouchbaseDeleteFailure private (override val id: String, failure: Throwable) +final case class CouchbaseDeleteFailure private[couchbase] (override val id: String, failure: Throwable) extends CouchbaseDeleteResult { val isSuccess: Boolean = false val isFailure: Boolean = true diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala index 6a35d171a..ad35b1cee 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala @@ -29,7 +29,7 @@ import pekko.stream.connectors.couchbase.{ import pekko.stream.scaladsl.Flow import com.couchbase.client.java.document.{ Document, JsonDocument } -import scala.concurrent.ExecutionContext +import scala.concurrent.{ ExecutionContext, Future } /** * Scala API: Factory methods for Couchbase flows. @@ -100,8 +100,8 @@ object CouchbaseFlow { */ def upsertDocWithResult[T <: Document[_]](sessionSettings: CouchbaseSessionSettings, writeSettings: CouchbaseWriteSettings, - bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] = - Flow + bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] = { + val flow: Flow[T, CouchbaseWriteResult[T], Future[NotUsed]] = Flow .fromMaterializer { (materializer, _) => val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[T] @@ -115,7 +115,8 @@ object CouchbaseFlow { } }) } - .mapMaterializedValue(_ => NotUsed) + flow.mapMaterializedValue(_ => NotUsed) + } /** * Create a flow to replace a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]]. @@ -153,8 +154,8 @@ object CouchbaseFlow { */ def replaceDocWithResult[T <: Document[_]](sessionSettings: CouchbaseSessionSettings, writeSettings: CouchbaseWriteSettings, - bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] = - Flow + bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] = { + val flow: Flow[T, CouchbaseWriteResult[T], Future[NotUsed]] = Flow .fromMaterializer { (materializer, _) => val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[T] @@ -168,7 +169,8 @@ object CouchbaseFlow { } }) } - .mapMaterializedValue(_ => NotUsed) + flow.mapMaterializedValue(_ => NotUsed) + } /** * Create a flow to delete documents from Couchbase by `id`. Emits the same `id`. diff --git a/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala b/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala index e517dfe39..808413f72 100644 --- a/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala +++ b/couchbase/src/test/scala/org/apache/pekko/stream/connectors/couchbase/testing/CouchbaseSupport.scala @@ -25,8 +25,9 @@ import com.couchbase.client.deps.io.netty.util.CharsetUtil import com.couchbase.client.java.ReplicateTo import com.couchbase.client.java.document.json.JsonObject import com.couchbase.client.java.document.{ BinaryDocument, JsonDocument, RawJsonDocument, StringDocument } +import com.fasterxml.jackson.databind.json.JsonMapper +import com.fasterxml.jackson.module.scala.DefaultScalaModule import org.slf4j.LoggerFactory -import play.api.libs.json.Json import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext.Implicits.global @@ -35,6 +36,12 @@ import scala.concurrent.{ Await, Future } case class TestObject(id: String, value: String) +private[couchbase] object CouchbaseSupport { + val jacksonMapper = JsonMapper.builder() + .addModule(DefaultScalaModule) + .build() +} + trait CouchbaseSupport { private val log = LoggerFactory.getLogger(classOf[CouchbaseSupport]) @@ -64,7 +71,7 @@ trait CouchbaseSupport { } def toRawJsonDocument(testObject: TestObject): RawJsonDocument = { - val json = Json.toJson(testObject)(Json.writes[TestObject]).toString() + val json = CouchbaseSupport.jacksonMapper.writeValueAsString(testObject) RawJsonDocument.create(testObject.id, json) } @@ -72,12 +79,12 @@ trait CouchbaseSupport { JsonDocument.create(testObject.id, JsonObject.create().put("id", testObject.id).put("value", testObject.value)) def toStringDocument(testObject: TestObject): StringDocument = { - val json = Json.toJson(testObject)(Json.writes[TestObject]).toString() + val json = CouchbaseSupport.jacksonMapper.writeValueAsString(testObject) StringDocument.create(testObject.id, json) } def toBinaryDocument(testObject: TestObject): BinaryDocument = { - val json = Json.toJson(testObject)(Json.writes[TestObject]).toString() + val json = CouchbaseSupport.jacksonMapper.writeValueAsString(testObject) val toWrite = Unpooled.copiedBuffer(json, CharsetUtil.UTF_8) BinaryDocument.create(testObject.id, toWrite) } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 458601ffa..f711adcd3 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -120,13 +120,14 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided)) val Couchbase = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( - "com.couchbase.client" % "java-client" % CouchbaseVersion, - "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", - "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided, - "com.typesafe.play" %% "play-json" % "2.9.2" % Test, - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion % Test)) + "com.couchbase.client" % "java-client" % CouchbaseVersion, // ApacheV2 + "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", // ApacheV2 + "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided, // Apache V2 + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion % Test, // Apache V2 + "com.fasterxml.jackson.core" % "jackson-databind" % JacksonDatabindVersion % Test, // Apache V2 + "com.fasterxml.jackson.module" %% "jackson-module-scala" % JacksonDatabindVersion % Test // Apache V2 + )) val `Doc-examples` = Seq( libraryDependencies ++= Seq( From 7f3a29a6e1f4c79eeb9fb0838a51ee46dd2279cf Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Wed, 31 May 2023 08:23:09 +0100 Subject: [PATCH 16/90] More scala3 work (cassandra / azure) (#130) * scala3 support for cassandra connector * try azure connector --- .../azure/storagequeue/impl/AzureQueueSourceStage.scala | 2 +- .../connectors/cassandra/javadsl/CassandraSessionSpec.scala | 2 +- .../cassandra/scaladsl/CassandraSessionPerformanceSpec.scala | 2 +- project/Dependencies.scala | 2 -- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala index 1ba78bb51..ff882cc00 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/impl/AzureQueueSourceStage.scala @@ -68,7 +68,7 @@ import scala.collection.mutable.Queue setHandler( out, new OutHandler { - override def onPull: Unit = + override def onPull(): Unit = if (!buffer.isEmpty) { push(out, buffer.dequeue()) } else { diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala index 210c60321..0aeec5fe1 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSessionSpec.scala @@ -38,7 +38,7 @@ import scala.concurrent.duration._ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("CassandraSessionSpec")) { - val log = Logging(system, this.getClass) + val log = Logging(system, classOf[CassandraSessionSpec]) val javadslSessionRegistry = javadsl.CassandraSessionRegistry.get(system) val data = 1 until 103 diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala index da0943efc..00fb120c6 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala @@ -25,7 +25,7 @@ import scala.concurrent.duration._ final class CassandraSessionPerformanceSpec extends CassandraSpecBase(ActorSystem("CassandraSessionPerformanceSpec")) { - val log = Logging(system, this.getClass) + val log = Logging(system, classOf[CassandraSessionPerformanceSpec]) val data = 1 to 5 * 1000 * 1000 diff --git a/project/Dependencies.scala b/project/Dependencies.scala index f711adcd3..0989ee015 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -102,7 +102,6 @@ object Dependencies { ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito) val AzureStorageQueue = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.microsoft.azure" % "azure-storage" % "8.0.0")) @@ -111,7 +110,6 @@ object Dependencies { val CassandraDriverVersionInDocs = "4.15" val Cassandra = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("com.datastax.oss" % "java-driver-core" % CassandraDriverVersion) .exclude("com.github.spotbugs", "spotbugs-annotations") From fc4417af82c7c22439cf6f7e5a1e87298d0e2107 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Thu, 8 Jun 2023 15:59:33 +0100 Subject: [PATCH 17/90] support scala3 (google-common) (#141) * support scala3 (google-common) * Update ResumableUpload.scala * add huawei * Update Dependencies.scala * Update BigQuery.scala * annotation --- .../bigquery/BigQueryException.scala | 2 +- .../bigquery/javadsl/BigQuery.scala | 29 ++++--- .../googlecloud/pubsub/impl/PubSubApi.scala | 6 +- .../googlecloud/storage/impl/Formats.scala | 12 ++- .../storage/impl/GCStorageStream.scala | 4 +- .../storage/WithMaterializerGlobal.scala | 2 +- .../impl/GCStorageStreamIntegrationSpec.scala | 2 +- .../google/javadsl/XUploadContentType.java | 3 +- .../connectors/google/GoogleSettings.scala | 6 +- .../connectors/google/ResumableUpload.scala | 58 ++++++------- .../connectors/google/auth/Credentials.scala | 2 +- .../connectors/google/auth/GoogleOAuth2.scala | 3 +- .../google/auth/GoogleOAuth2Exception.scala | 3 +- .../connectors/google/http/GoogleHttp.scala | 10 +-- .../connectors/google/javadsl/Google.scala | 4 +- .../connectors/google/jwt/JwtSprayJson.scala | 81 +++++++++++++++++++ .../scaladsl/`X-Upload-Content-Type`.scala | 2 +- .../google/ResumableUploadSpec.scala | 4 +- .../google/auth/GoogleOAuth2Spec.scala | 2 +- .../google/auth/OAuth2CredentialsSpec.scala | 2 +- .../google/http/GoogleHttpSpec.scala | 5 +- .../google/firebase/fcm/impl/FcmSender.scala | 2 +- .../firebase/fcm/v1/impl/FcmSender.scala | 2 +- .../firebase/fcm/v1/impl/FcmSenderSpec.scala | 2 +- .../pushkit/impl/PushKitSenderSpec.scala | 2 +- .../docs/scaladsl/MqttActorSystemsSpec.scala | 6 +- project/Dependencies.scala | 13 ++- 27 files changed, 181 insertions(+), 88 deletions(-) create mode 100644 google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala index 426f1dd8d..d0ea423d3 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala @@ -39,7 +39,7 @@ object BigQueryException { implicit val fromResponseUnmarshaller: FromResponseUnmarshaller[Throwable] = Unmarshaller - .withMaterializer { implicit ec => implicit mat => response: HttpResponse => + .withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => import SprayJsonSupport._ val HttpResponse(status, _, entity, _) = response: @nowarn("msg=match may not be exhaustive") Unmarshaller diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala index 6e9dd836f..109811e7f 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/javadsl/BigQuery.scala @@ -19,7 +19,7 @@ import pekko.annotation.ApiMayChange import pekko.http.javadsl.marshalling.Marshaller import pekko.http.javadsl.model.{ HttpEntity, RequestEntity } import pekko.http.javadsl.unmarshalling.Unmarshaller -import pekko.http.scaladsl.{ model => sm } +import pekko.http.scaladsl.{ marshalling, model => sm, unmarshalling } import pekko.japi.Pair import pekko.stream.connectors.google.GoogleSettings import pekko.stream.connectors.google.javadsl.Google @@ -45,7 +45,6 @@ import pekko.util.OptionConverters._ import java.time.Duration import java.util.concurrent.CompletionStage import java.{ lang, util } - import scala.annotation.nowarn import scala.concurrent.duration.{ FiniteDuration, MILLISECONDS } @@ -220,7 +219,8 @@ object BigQuery extends Google { selectedFields: util.List[String], unmarshaller: Unmarshaller[HttpEntity, TableDataListResponse[Out]]) : Source[Out, CompletionStage[TableDataListResponse[Out]]] = { - implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] + implicit val um: unmarshalling.Unmarshaller[sm.HttpEntity, TableDataListResponse[Out]] = + unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery .tableData(datasetId, tableId, startIndex.toScala, maxResults.toScala, selectedFields.asScala.toList) .mapMaterializedValue(_.asJava) @@ -245,7 +245,8 @@ object BigQuery extends Google { retryPolicy: InsertAllRetryPolicy, templateSuffix: util.Optional[String], marshaller: Marshaller[TableDataInsertAllRequest[In], RequestEntity]): Sink[util.List[In], NotUsed] = { - implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] + implicit val m: marshalling.Marshaller[TableDataInsertAllRequest[In], sm.RequestEntity] = + marshaller.asScalaCastOutput[sm.RequestEntity] ss.Flow[util.List[In]] .map(_.asScala.toList) .to(ScalaBigQuery.insertAll[In](datasetId, tableId, retryPolicy, templateSuffix.toScala)) @@ -269,7 +270,8 @@ object BigQuery extends Google { retryFailedRequests: Boolean, marshaller: Marshaller[TableDataInsertAllRequest[In], RequestEntity]) : Flow[TableDataInsertAllRequest[In], TableDataInsertAllResponse, NotUsed] = { - implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] + implicit val m: marshalling.Marshaller[TableDataInsertAllRequest[In], sm.RequestEntity] = + marshaller.asScalaCastOutput[sm.RequestEntity] ScalaBigQuery.insertAll[In](datasetId, tableId, retryFailedRequests).asJava } @@ -290,7 +292,8 @@ object BigQuery extends Google { dryRun: Boolean, useLegacySql: Boolean, unmarshaller: Unmarshaller[HttpEntity, QueryResponse[Out]]): Source[Out, CompletionStage[QueryResponse[Out]]] = { - implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] + implicit val um: unmarshalling.Unmarshaller[sm.HttpEntity, QueryResponse[Out]] = + unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery.query(query, dryRun, useLegacySql).mapMaterializedValue(_.asJava).asJava } @@ -309,7 +312,8 @@ object BigQuery extends Google { query: QueryRequest, unmarshaller: Unmarshaller[HttpEntity, QueryResponse[Out]]) : Source[Out, Pair[CompletionStage[JobReference], CompletionStage[QueryResponse[Out]]]] = { - implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] + implicit val um: unmarshalling.Unmarshaller[sm.HttpEntity, QueryResponse[Out]] = + unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery .query(query) .mapMaterializedValue { @@ -339,7 +343,8 @@ object BigQuery extends Google { timeout: util.Optional[Duration], location: util.Optional[String], unmarshaller: Unmarshaller[HttpEntity, QueryResponse[Out]]): Source[Out, CompletionStage[QueryResponse[Out]]] = { - implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] + implicit val um: unmarshalling.Unmarshaller[sm.HttpEntity, QueryResponse[Out]] = + unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery .queryResults(jobId, startIndex.toScala, @@ -396,7 +401,7 @@ object BigQuery extends Google { def insertAllAsync[In](datasetId: String, tableId: String, marshaller: Marshaller[In, RequestEntity]): Flow[In, Job, NotUsed] = { - implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] + implicit val m: marshalling.Marshaller[In, sm.RequestEntity] = marshaller.asScalaCastOutput[sm.RequestEntity] ScalaBigQuery.insertAllAsync[In](datasetId, tableId).asJava[In] } @@ -415,7 +420,7 @@ object BigQuery extends Google { tableId: String, labels: util.Optional[util.Map[String, String]], marshaller: Marshaller[In, RequestEntity]): Flow[In, Job, NotUsed] = { - implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] + implicit val m: marshalling.Marshaller[In, sm.RequestEntity] = marshaller.asScalaCastOutput[sm.RequestEntity] ScalaBigQuery.insertAllAsync[In](datasetId, tableId, labels.toScala.map(_.asScala.toMap)).asJava[In] } @@ -436,8 +441,8 @@ object BigQuery extends Google { job: Job, marshaller: Marshaller[Job, RequestEntity], unmarshaller: Unmarshaller[HttpEntity, Job]): Sink[ByteString, CompletionStage[Job]] = { - implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] - implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] + implicit val m: marshalling.Marshaller[Job, sm.RequestEntity] = marshaller.asScalaCastOutput[sm.RequestEntity] + implicit val um: unmarshalling.Unmarshaller[sm.HttpEntity, Job] = unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery.createLoadJob(job).mapMaterializedValue(_.asJava).asJava[ByteString] } diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala index 696406f89..66b1e27a7 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala @@ -173,7 +173,7 @@ private[pubsub] trait PubSubApi { .mapMaterializedValue(_ => NotUsed) private implicit val pullResponseUnmarshaller: FromResponseUnmarshaller[PullResponse] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case StatusCodes.Success(_) if response.entity.contentType == ContentTypes.`application/json` => Unmarshal(response.entity).to[PullResponse] @@ -211,7 +211,7 @@ private[pubsub] trait PubSubApi { .mapMaterializedValue(_ => NotUsed) private implicit val acknowledgeResponseUnmarshaller: FromResponseUnmarshaller[Done] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case StatusCodes.Success(_) => response.discardEntityBytes().future @@ -261,7 +261,7 @@ private[pubsub] trait PubSubApi { publish(topic, parallelism, None) private implicit val publishResponseUnmarshaller: FromResponseUnmarshaller[PublishResponse] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case StatusCodes.Success(_) if response.entity.contentType == ContentTypes.`application/json` => Unmarshal(response.entity).to[PublishResponse] diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala index 5c20c3931..ee2da5dcc 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala @@ -49,7 +49,8 @@ object Formats extends DefaultJsonProtocol { domain: String, projectTeam: ProjectTeam, etag: String) - private implicit val ObjectAccessControlsJsonFormat = jsonFormat13(ObjectAccessControls) + private implicit val ObjectAccessControlsJsonFormat: RootJsonFormat[ObjectAccessControls] = + jsonFormat13(ObjectAccessControls.apply) /** * Google API storage response object @@ -79,7 +80,8 @@ object Formats extends DefaultJsonProtocol { timeStorageClassUpdated: String, updated: String) - private implicit val storageObjectReadOnlyJson = jsonFormat18(StorageObjectReadOnlyJson) + private implicit val storageObjectReadOnlyJson: RootJsonFormat[StorageObjectReadOnlyJson] = + jsonFormat18(StorageObjectReadOnlyJson.apply) // private sub class of StorageObjectJson used to workaround 22 field jsonFormat issue private final case class StorageObjectWriteableJson( @@ -98,7 +100,8 @@ object Formats extends DefaultJsonProtocol { temporaryHold: Option[Boolean], acl: Option[List[ObjectAccessControls]]) - private implicit val storageObjectWritableJson = jsonFormat14(StorageObjectWriteableJson) + private implicit val storageObjectWritableJson: RootJsonFormat[StorageObjectWriteableJson] = + jsonFormat14(StorageObjectWriteableJson.apply) private implicit object StorageObjectJsonFormat extends RootJsonFormat[StorageObjectJson] { override def read(value: JsValue): StorageObjectJson = { @@ -175,7 +178,8 @@ object Formats extends DefaultJsonProtocol { } } - private implicit val bucketListResultJsonReads = jsonFormat4(BucketListResultJson.apply) + private implicit val bucketListResultJsonReads: RootJsonFormat[BucketListResultJson] = + jsonFormat4(BucketListResultJson.apply) implicit object RewriteResponseReads extends RootJsonReader[RewriteResponse] { override def read(json: JsValue): RewriteResponse = { diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala index 0ba705a5c..b67496ea3 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala @@ -242,7 +242,7 @@ import scala.concurrent.{ ExecutionContext, Future } getBucketPath(bucket) / "o" / objectName implicit def unmarshaller[T: FromEntityUnmarshaller]: Unmarshaller[HttpResponse, T] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response match { case HttpResponse(status, _, entity, _) if status.isSuccess() && !status.isRedirection() => Unmarshal(entity).to[T] @@ -254,7 +254,7 @@ import scala.concurrent.{ ExecutionContext, Future } }.withDefaultRetry implicit def optionUnmarshaller[T: FromEntityUnmarshaller]: Unmarshaller[HttpResponse, Option[T]] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response match { case HttpResponse(status, _, entity, _) if status.isSuccess() && !status.isRedirection() => Unmarshal(entity).to[T].map(Some(_)) diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala index eb046720a..787885b91 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala @@ -29,7 +29,7 @@ trait WithMaterializerGlobal with ScalaFutures with IntegrationPatience with Matchers { - implicit val actorSystem = ActorSystem("test") + implicit val actorSystem: ActorSystem = ActorSystem("test") implicit val ec: ExecutionContext = actorSystem.dispatcher override protected def afterAll(): Unit = { diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala index 0715faa16..d891f8241 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala @@ -53,7 +53,7 @@ class GCStorageStreamIntegrationSpec with ScalaFutures with LogCapturing { - private implicit val defaultPatience = + private implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 60.seconds, interval = 60.millis) var folderName: String = _ diff --git a/google-common/src/main/java/org/apache/pekko/stream/connectors/google/javadsl/XUploadContentType.java b/google-common/src/main/java/org/apache/pekko/stream/connectors/google/javadsl/XUploadContentType.java index 6edef7564..10e48b59d 100644 --- a/google-common/src/main/java/org/apache/pekko/stream/connectors/google/javadsl/XUploadContentType.java +++ b/google-common/src/main/java/org/apache/pekko/stream/connectors/google/javadsl/XUploadContentType.java @@ -22,7 +22,6 @@ public interface XUploadContentType { ContentType getContentType(); static XUploadContentType create(ContentType contentType) { - return X$minusUpload$minusContent$minusType$.MODULE$.apply( - (org.apache.pekko.http.scaladsl.model.ContentType) contentType); + return X$minusUpload$minusContent$minusType$.MODULE$.apply(contentType.toString()); } } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala index ef91cfd78..6ca40022d 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala @@ -90,7 +90,7 @@ object GoogleSettings { } -final case class GoogleSettings @InternalApi private (projectId: String, +final case class GoogleSettings @InternalApi private[connectors] (projectId: String, credentials: Credentials, requestSettings: RequestSettings) { def getProjectId = projectId @@ -134,7 +134,7 @@ object RequestSettings { apply(userIp.toScala, quotaUser.toScala, prettyPrint, chunkSize, retrySettings, forwardProxy.toScala) } -final case class RequestSettings @InternalApi private ( +final case class RequestSettings @InternalApi private[connectors] ( userIp: Option[String], quotaUser: Option[String], prettyPrint: Boolean, @@ -252,7 +252,7 @@ object ForwardProxy { credentials: Option[BasicHttpCredentials], trustPem: Option[String])(implicit system: ClassicActorSystemProvider): ForwardProxy = { ForwardProxy( - trustPem.fold(Http(system).defaultClientHttpsContext)(ForwardProxyHttpsContext(_)), + trustPem.fold(Http(system.classicSystem).defaultClientHttpsContext)(ForwardProxyHttpsContext(_)), ForwardProxyPoolSettings(scheme, host, port, credentials)(system.classicSystem)) } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index f50c921b9..8dbc91cc2 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -56,7 +56,7 @@ private[connectors] object ResumableUpload { Sink .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val materializer = mat + implicit val materializer: Materializer = mat implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uploadChunkSize = settings.requestSettings.uploadChunkSize @@ -96,25 +96,24 @@ private[connectors] object ResumableUpload { private def initiateSession(request: HttpRequest)(implicit mat: Materializer, settings: GoogleSettings): Future[Uri] = { - implicit val system: ActorSystem = mat.system import implicits._ - implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => - response.discardEntityBytes().future.map { _ => - response.header[Location].fold(throw InvalidResponseException(ErrorInfo("No Location header")))(_.uri) - } - }.withDefaultRetry + implicit val um: FromResponseUnmarshaller[Uri] = + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + response.discardEntityBytes().future.map { _ => + response.header[Location].fold(throw InvalidResponseException(ErrorInfo("No Location header")))(_.uri) + } + }.withDefaultRetry - GoogleHttp().singleAuthenticatedRequest[Uri](request) + GoogleHttp(mat.system).singleAuthenticatedRequest[Uri](request) } private final case class DoNotRetry(ex: Throwable) extends Throwable(ex) with NoStackTrace private def uploadChunk[T: FromResponseUnmarshaller]( request: HttpRequest)(implicit mat: Materializer): Flow[Either[T, MaybeLast[Chunk]], Try[Option[T]], NotUsed] = { - implicit val system: ActorSystem = mat.system - val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case PermanentRedirect => response.discardEntityBytes().future.map(_ => None) @@ -127,7 +126,8 @@ private[connectors] object ResumableUpload { val uri = request.uri Flow[HttpRequest] .map((_, ())) - .via(GoogleHttp().cachedHostConnectionPoolWithContext(uri.authority.host.address, uri.effectivePort)(um)) + .via(GoogleHttp(mat.system).cachedHostConnectionPoolWithContext(uri.authority.host.address, uri.effectivePort)( + um)) .map(_._1.recoverWith { case DoNotRetry(ex) => Failure(ex) }) } @@ -147,30 +147,30 @@ private[connectors] object ResumableUpload { request: HttpRequest, chunk: Future[MaybeLast[Chunk]])( implicit mat: Materializer, settings: GoogleSettings): Future[Either[T, MaybeLast[Chunk]]] = { - implicit val system: ActorSystem = mat.system import implicits._ - implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => - response.status match { - case OK | Created => Unmarshal(response).to[T].map(Left(_)) - case PermanentRedirect => - response.discardEntityBytes().future.map { _ => - Right( - response - .header[Range] - .flatMap(_.ranges.headOption) - .collect { - case Slice(_, last) => last + 1 - }.getOrElse(0L)) - } - case _ => throw InvalidResponseException(ErrorInfo(response.status.value, response.status.defaultMessage)) - } - }.withDefaultRetry + implicit val um: FromResponseUnmarshaller[Either[T, Long]] = + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + response.status match { + case OK | Created => Unmarshal(response).to[T].map(Left(_)) + case PermanentRedirect => + response.discardEntityBytes().future.map { _ => + Right( + response + .header[Range] + .flatMap(_.ranges.headOption) + .collect { + case Slice(_, last) => last + 1 + }.getOrElse(0L)) + } + case _ => throw InvalidResponseException(ErrorInfo(response.status.value, response.status.defaultMessage)) + } + }.withDefaultRetry import mat.executionContext chunk.flatMap { case maybeLast @ MaybeLast(Chunk(bytes, position)) => - GoogleHttp() + GoogleHttp(mat.system) .singleAuthenticatedRequest[Either[T, Long]](request.addHeader(statusRequestHeader)) .map { case Left(result) if maybeLast.isLast => Left(result) diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala index 1bb2fd350..72c0e0226 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/Credentials.scala @@ -36,7 +36,7 @@ object Credentials { */ def apply(c: Config)(implicit system: ClassicActorSystemProvider): Credentials = c.getString("provider") match { case "application-default" => - val log = Logging(system.classicSystem, getClass) + val log = Logging(system.classicSystem, classOf[Credentials]) try { val creds = parseServiceAccount(c) log.info("Using service account credentials") diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala index 4099f91c1..4455748c4 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala @@ -21,9 +21,10 @@ import pekko.http.scaladsl.model.HttpMethods.POST import pekko.http.scaladsl.model.{ FormData, HttpRequest } import pekko.stream.Materializer import pekko.stream.connectors.google.http.GoogleHttp +import pekko.stream.connectors.google.jwt.JwtSprayJson import pekko.stream.connectors.google.{ implicits, RequestSettings } import pdi.jwt.JwtAlgorithm.RS256 -import pdi.jwt.{ JwtClaim, JwtSprayJson } +import pdi.jwt.JwtClaim import spray.json.DefaultJsonProtocol._ import spray.json.JsonFormat diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala index ae9690008..9c88c182f 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala @@ -22,7 +22,8 @@ import pekko.stream.connectors.google.util.Retry import spray.json.DefaultJsonProtocol._ import spray.json.RootJsonFormat -final case class GoogleOAuth2Exception private (override val info: ErrorInfo) extends ExceptionWithErrorInfo(info) +final case class GoogleOAuth2Exception private[google] (override val info: ErrorInfo) + extends ExceptionWithErrorInfo(info) private[google] object GoogleOAuth2Exception { diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala index d536f8b00..0bafea003 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala @@ -14,7 +14,7 @@ package org.apache.pekko.stream.connectors.google.http import org.apache.pekko -import pekko.actor.ClassicActorSystemProvider +import pekko.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, Scheduler } import pekko.annotation.InternalApi import pekko.dispatch.ExecutionContexts import pekko.http.scaladsl.Http.HostConnectionPool @@ -26,7 +26,7 @@ import pekko.stream.connectors.google.{ GoogleAttributes, GoogleSettings, Reques import pekko.stream.connectors.google.util.Retry import pekko.stream.scaladsl.{ Flow, FlowWithContext, Keep, RetryFlow } -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContextExecutor, Future } import scala.util.{ Failure, Success, Try } @InternalApi @@ -45,9 +45,9 @@ private[connectors] object GoogleHttp { @InternalApi private[connectors] final class GoogleHttp private (val http: HttpExt) extends AnyVal { - private implicit def system = http.system - private implicit def ec = system.dispatcher - private implicit def scheduler = system.scheduler + private implicit def system: ExtendedActorSystem = http.system + private implicit def ec: ExecutionContextExecutor = system.dispatcher + private implicit def scheduler: Scheduler = system.scheduler /** * Sends a single [[HttpRequest]] and returns the raw [[HttpResponse]]. diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala index 20785a9fa..1325bced9 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/javadsl/Google.scala @@ -18,7 +18,7 @@ import pekko.NotUsed import pekko.actor.ClassicActorSystemProvider import pekko.http.javadsl.model.{ HttpRequest, HttpResponse } import pekko.http.javadsl.unmarshalling.Unmarshaller -import pekko.http.scaladsl.{ model => sm } +import pekko.http.scaladsl.{ model => sm, unmarshalling } import pekko.stream.connectors.google.GoogleSettings import pekko.stream.connectors.google.scaladsl.{ Google => ScalaGoogle } import pekko.stream.javadsl.{ Sink, Source } @@ -59,7 +59,7 @@ private[connectors] trait Google { */ final def paginatedRequest[Out <: Paginated](request: HttpRequest, unmarshaller: Unmarshaller[HttpResponse, Out]): Source[Out, NotUsed] = { - implicit val um = unmarshaller.asScala + implicit val um: unmarshalling.Unmarshaller[HttpResponse, Out] = unmarshaller.asScala ScalaGoogle.paginatedRequest[Out](request).asJava } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala new file mode 100644 index 000000000..8362fa88e --- /dev/null +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +package org.apache.pekko.stream.connectors.google.jwt + +import java.time.Clock +import org.apache.pekko +import pekko.annotation.InternalApi +import pdi.jwt._ +import pdi.jwt.exceptions.JwtNonStringException +import spray.json._ + +/** + * Implementation of `JwtCore` using `JsObject` from spray-json. + */ +@InternalApi +private[google] trait JwtSprayJsonParser[H, C] extends JwtJsonCommon[JsObject, H, C] { + protected def parse(value: String): JsObject = value.parseJson.asJsObject + + protected def stringify(value: JsObject): String = value.compactPrint + + protected def getAlgorithm(header: JsObject): Option[JwtAlgorithm] = + header.fields.get("alg").flatMap { + case JsString("none") => None + case JsString(algo) => Option(JwtAlgorithm.fromString(algo)) + case JsNull => None + case _ => throw new JwtNonStringException("alg") + } + +} + +@InternalApi +private[google] object JwtSprayJson extends JwtSprayJsonParser[JwtHeader, JwtClaim] { + import DefaultJsonProtocol._ + + def apply(clock: Clock): JwtSprayJson = new JwtSprayJson(clock) + + override def parseHeader(header: String): JwtHeader = { + val jsObj = parse(header) + JwtHeader( + algorithm = getAlgorithm(jsObj), + typ = safeGetField[String](jsObj, "typ"), + contentType = safeGetField[String](jsObj, "cty"), + keyId = safeGetField[String](jsObj, "kid")) + } + + override def parseClaim(claim: String): JwtClaim = { + val jsObj = parse(claim) + val content = JsObject( + jsObj.fields - "iss" - "sub" - "aud" - "exp" - "nbf" - "iat" - "jti") + JwtClaim( + content = stringify(content), + issuer = safeGetField[String](jsObj, "iss"), + subject = safeGetField[String](jsObj, "sub"), + audience = safeGetField[Set[String]](jsObj, "aud") + .orElse(safeGetField[String](jsObj, "aud").map(s => Set(s))), + expiration = safeGetField[Long](jsObj, "exp"), + notBefore = safeGetField[Long](jsObj, "nbf"), + issuedAt = safeGetField[Long](jsObj, "iat"), + jwtId = safeGetField[String](jsObj, "jti")) + } + + private[this] def safeRead[A: JsonReader](js: JsValue) = + safeReader[A].read(js).fold(_ => None, a => Option(a)) + + private[this] def safeGetField[A: JsonReader](js: JsObject, name: String) = + js.fields.get(name).flatMap(safeRead[A]) +} + +@InternalApi +private[google] class JwtSprayJson private (override val clock: Clock) + extends JwtSprayJsonParser[JwtHeader, JwtClaim] { + override def parseHeader(header: String): JwtHeader = JwtSprayJson.parseHeader(header) + override def parseClaim(header: String): JwtClaim = JwtSprayJson.parseClaim(header) +} diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala index 291a26ab0..e7b85c461 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/scaladsl/`X-Upload-Content-Type`.scala @@ -34,7 +34,7 @@ object `X-Upload-Content-Type` extends ModeledCustomHeaderCompanion[`X-Upload-Co contentType => Success(`X-Upload-Content-Type`(contentType))) } -final case class `X-Upload-Content-Type` private (contentType: ContentType) +final case class `X-Upload-Content-Type` private[connectors] (contentType: ContentType) extends ModeledCustomHeader[`X-Upload-Content-Type`] with XUploadContentType { override def value(): String = contentType.toString() diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala index 819834bb8..bca269015 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala @@ -18,7 +18,7 @@ import pekko.actor.ActorSystem import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import pekko.http.scaladsl.model.HttpMethods.POST import pekko.http.scaladsl.model.{ ContentTypes, HttpRequest, Uri } -import pekko.http.scaladsl.unmarshalling.Unmarshaller +import pekko.http.scaladsl.unmarshalling.{ FromResponseUnmarshaller, Unmarshaller } import pekko.stream.connectors.google.scaladsl.`X-Upload-Content-Type` import pekko.stream.scaladsl.Source import pekko.testkit.TestKit @@ -79,7 +79,7 @@ class ResumableUploadSpec .willReturn(created().header("Content-Type", "application/json").body("{}")))) import implicits._ - implicit val um = + implicit val um: FromResponseUnmarshaller[JsValue] = Unmarshaller.messageUnmarshallerFromEntityUnmarshaller(sprayJsValueUnmarshaller).withDefaultRetry val result = Source diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala index 1bd56b790..753e3004a 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala @@ -46,7 +46,7 @@ class GoogleOAuth2Spec implicit val executionContext: ExecutionContext = system.dispatcher implicit val settings: GoogleSettings = GoogleSettings(system) - implicit val clock = Clock.systemUTC() + implicit val clock: Clock = Clock.systemUTC() lazy val privateKey = { val inputStream = getClass.getClassLoader.getResourceAsStream("private_pcks8.pem") diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala index 7d7e2342a..446336dd9 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala @@ -45,7 +45,7 @@ class OAuth2CredentialsSpec import system.dispatcher implicit val settings: RequestSettings = GoogleSettings().requestSettings - implicit val clock = Clock.systemUTC() + implicit val clock: Clock = Clock.systemUTC() final object AccessTokenProvider { @volatile var accessTokenPromise: Promise[AccessToken] = Promise.failed(new RuntimeException) diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala index 56a75389c..cc37d94f2 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala @@ -67,10 +67,11 @@ class GoogleHttpSpec anyInt, any[HttpsConnectionContext], any[ConnectionPoolSettings], - any[LoggingAdapter])).thenReturn(Flow[Any] + any[LoggingAdapter])).thenReturn( + Flow[Any] .zipWith(response)(Keep.right) .map(Try(_)) - .map((_, mock[Nothing])) + .map((_, mock[Nothing](scala.reflect.ClassTag.Nothing))) .mapMaterializedValue(_ => mock[HostConnectionPool]), Nil: _*): @nowarn("msg=dead code") http diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala index 3a366face..859543def 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmSender.scala @@ -55,7 +55,7 @@ private[fcm] class FcmSender { } implicit private val unmarshaller: FromResponseUnmarshaller[FcmSuccessResponse] = Unmarshaller.withMaterializer { - implicit ec => implicit mat => response: HttpResponse => + implicit ec => implicit mat => (response: HttpResponse) => if (response.status.isSuccess) { Unmarshal(response.entity).to[FcmSuccessResponse] } else { diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala index 479855e10..66e96560c 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSender.scala @@ -50,7 +50,7 @@ private[fcm] class FcmSender { } implicit private val unmarshaller: FromResponseUnmarshaller[FcmSuccessResponse] = Unmarshaller.withMaterializer { - implicit ec => implicit mat => response: HttpResponse => + implicit ec => implicit mat => (response: HttpResponse) => if (response.status.isSuccess) { Unmarshal(response.entity).to[FcmSuccessResponse] } else { diff --git a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala index a2d4d4922..ca6c18820 100644 --- a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala +++ b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala @@ -57,7 +57,7 @@ class FcmSenderSpec implicit val executionContext: ExecutionContext = system.dispatcher - implicit val conf = FcmSettings() + implicit val conf: FcmSettings = FcmSettings() implicit val settings: GoogleSettings = GoogleSettings().copy(projectId = "projectId") "FcmSender" should { diff --git a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala index cc83e885f..16a7f78ce 100644 --- a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala +++ b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala @@ -55,7 +55,7 @@ class PushKitSenderSpec implicit val executionContext: ExecutionContext = system.dispatcher - implicit val config = HmsSettings() + implicit val config: HmsSettings = HmsSettings() "HmsSender" should { diff --git a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttActorSystemsSpec.scala b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttActorSystemsSpec.scala index ccf6e788a..301dde93c 100644 --- a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttActorSystemsSpec.scala +++ b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttActorSystemsSpec.scala @@ -14,6 +14,7 @@ package docs.scaladsl import org.apache.pekko +import pekko.actor import pekko.actor.typed.scaladsl.Behaviors import pekko.stream.connectors.mqtt.streaming.MqttSessionSettings import pekko.stream.connectors.mqtt.streaming.scaladsl.{ ActorMqttClientSession, ActorMqttServerSession } @@ -21,7 +22,8 @@ import org.scalatest.wordspec.AnyWordSpec class MqttTypedActorSystemSpec extends AnyWordSpec { - implicit val actorSystem = pekko.actor.typed.ActorSystem(Behaviors.ignore, "MqttTypedActorSystemSpec") + implicit val actorSystem: actor.typed.ActorSystem[Nothing] = + actor.typed.ActorSystem(Behaviors.ignore, "MqttTypedActorSystemSpec") "A typed actor system" should { "allow client creation" in { @@ -41,7 +43,7 @@ class MqttTypedActorSystemSpec extends AnyWordSpec { class MqttClassicActorSystemSpec extends AnyWordSpec { - implicit val actorSystem = pekko.actor.ActorSystem("MqttClassicActorSystemSpec") + implicit val actorSystem: actor.ActorSystem = actor.ActorSystem("MqttClassicActorSystemSpec") "A typed actor system" should { "allow client creation" in { diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 0989ee015..8006d1e7d 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -186,13 +186,13 @@ object Dependencies { "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies) val GoogleCommon = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "com.github.jwt-scala" %% "jwt-spray-json" % "7.1.4", - "com.google.auth" % "google-auth-library-credentials" % "0.24.1", - "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) + "com.github.jwt-scala" %% "jwt-json-common" % "7.1.5", // ApacheV2 + "com.google.auth" % "google-auth-library-credentials" % "0.24.1", // BSD 3-clause + "io.specto" % "hoverfly-java" % hoverflyVersion % Test // ApacheV2 + ) ++ Mockito) val GoogleBigQuery = Seq( crossScalaVersions -= Scala3, @@ -239,7 +239,6 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) val GoogleFcm = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion) ++ Mockito) @@ -283,11 +282,11 @@ object Dependencies { "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) val HuaweiPushKit = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "com.github.jwt-scala" %% "jwt-spray-json" % "7.1.4") ++ Mockito) + "com.github.jwt-scala" %% "jwt-json-common" % "7.1.5" // ApacheV2 + ) ++ Mockito) val InfluxDB = Seq( libraryDependencies ++= Seq( From e0202ad934f8a218bcded64835fb2ab99ead6e6f Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Thu, 8 Jun 2023 22:00:41 +0100 Subject: [PATCH 18/90] Scala3 support for more google connectors (#143) * support scala3 (google-common) * support scala3 on more google connectors * continue * imports * more implicit issues * Update BigQueryCollectionFormats.scala * further issues * class access issues * Update BigQueryRestBasicFormats.scala * use compactPrint because toString causes tests to fail in Scala3 * review issue --- .../scaladsl/ElasticsearchFlow.scala | 2 +- .../docs/scaladsl/ElasticsearchV5Spec.scala | 6 ++-- .../docs/scaladsl/ElasticsearchV7Spec.scala | 6 ++-- .../docs/scaladsl/OpensearchV1Spec.scala | 6 ++-- .../bigquery/model/DatasetJsonProtocol.scala | 6 ++-- .../model/ErrorProtoJsonProtocol.scala | 3 +- .../bigquery/model/JobJsonProtocol.scala | 21 ++++++++------ .../bigquery/model/QueryJsonProtocol.scala | 9 +++--- .../model/TableDataJsonProtocol.scala | 13 +++++---- .../bigquery/model/TableJsonProtocol.scala | 15 +++++----- .../spray/BigQueryCollectionFormats.scala | 26 +++++++++++------ .../spray/BigQueryRestBasicFormats.scala | 24 ++++++++-------- .../scala/docs/scaladsl/BigQueryDoc.scala | 12 ++++---- .../googlecloud/bigquery/e2e/A.scala | 4 +-- .../e2e/scaladsl/BigQueryEndToEndSpec.scala | 18 ++++++------ .../scaladsl/BigQueryQueriesSpec.scala | 12 ++++---- .../scaladsl/schema/BigQuerySchemasSpec.scala | 10 +++---- .../spray/BigQueryJsonProtocolSpec.scala | 4 +-- .../googlecloud/pubsub/impl/PubSubApi.scala | 28 ++++++++++--------- .../pubsub/impl/PubSubApiSpec.scala | 2 +- .../connectors/google/GoogleSettings.scala | 6 ++-- .../connectors/google/ResumableUpload.scala | 1 - .../google/auth/NoCredentials.scala | 2 +- project/Dependencies.scala | 2 -- 24 files changed, 128 insertions(+), 110 deletions(-) diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala index 463848cb2..3c80a959e 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/scaladsl/ElasticsearchFlow.scala @@ -221,7 +221,7 @@ object ElasticsearchFlow { } private final class SprayJsonWriter[T](implicit writer: JsonWriter[T]) extends MessageWriter[T] { - override def convert(message: T): String = message.toJson.toString() + override def convert(message: T): String = message.toJson.compactPrint } } diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala index 52b27dc11..7bc0bb25b 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala @@ -158,9 +158,9 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt // #string val write: Future[immutable.Seq[WriteResult[String, NotUsed]]] = Source( immutable.Seq( - WriteMessage.createIndexMessage("1", Book("Das Parfum").toJson.toString()), - WriteMessage.createIndexMessage("2", Book("Faust").toJson.toString()), - WriteMessage.createIndexMessage("3", Book("Die unendliche Geschichte").toJson.toString()))).via( + WriteMessage.createIndexMessage("1", Book("Das Parfum").toJson.compactPrint), + WriteMessage.createIndexMessage("2", Book("Faust").toJson.compactPrint), + WriteMessage.createIndexMessage("3", Book("Die unendliche Geschichte").toJson.compactPrint))).via( ElasticsearchFlow.create( constructElasticsearchParams(indexName, "_doc", ApiVersion.V5), settings = baseWriteSettings, diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala index 1d2522454..bccd5afeb 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala @@ -147,9 +147,9 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val write: Future[immutable.Seq[WriteResult[String, NotUsed]]] = Source( immutable.Seq( - WriteMessage.createIndexMessage("1", Book("Das Parfum").toJson.toString()), - WriteMessage.createIndexMessage("2", Book("Faust").toJson.toString()), - WriteMessage.createIndexMessage("3", Book("Die unendliche Geschichte").toJson.toString()))).via( + WriteMessage.createIndexMessage("1", Book("Das Parfum").toJson.compactPrint), + WriteMessage.createIndexMessage("2", Book("Faust").toJson.compactPrint), + WriteMessage.createIndexMessage("3", Book("Die unendliche Geschichte").toJson.compactPrint))).via( ElasticsearchFlow.create( constructElasticsearchParams(indexName, "_doc", ApiVersion.V7), settings = baseWriteSettings, diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala index 3ba59ee55..81a093399 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala @@ -163,9 +163,9 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils // #string val write: Future[immutable.Seq[WriteResult[String, NotUsed]]] = Source( immutable.Seq( - WriteMessage.createIndexMessage("1", Book("Das Parfum").toJson.toString()), - WriteMessage.createIndexMessage("2", Book("Faust").toJson.toString()), - WriteMessage.createIndexMessage("3", Book("Die unendliche Geschichte").toJson.toString()))).via( + WriteMessage.createIndexMessage("1", Book("Das Parfum").toJson.compactPrint), + WriteMessage.createIndexMessage("2", Book("Faust").toJson.compactPrint), + WriteMessage.createIndexMessage("3", Book("Die unendliche Geschichte").toJson.compactPrint))).via( ElasticsearchFlow.create( constructElasticsearchParams(indexName, "_doc", OpensearchApiVersion.V1), settings = baseWriteSettings, diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala index 3e0166776..ef9ff12ca 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/DatasetJsonProtocol.scala @@ -32,7 +32,7 @@ import scala.collection.immutable.Seq * @param labels the labels associated with this dataset * @param location the geographic location where the dataset should reside */ -final case class Dataset private (datasetReference: DatasetReference, +final case class Dataset private[bigquery] (datasetReference: DatasetReference, friendlyName: Option[String], labels: Option[Map[String, String]], location: Option[String]) { @@ -87,7 +87,7 @@ object Dataset { * @param datasetId A unique ID for this dataset, without the project name * @param projectId The ID of the project containing this dataset */ -final case class DatasetReference private (datasetId: Option[String], projectId: Option[String]) { +final case class DatasetReference private[bigquery] (datasetId: Option[String], projectId: Option[String]) { def getDatasetId = datasetId.toJava def getProjectId = projectId.toJava @@ -126,7 +126,7 @@ object DatasetReference { * @param nextPageToken a token that can be used to request the next results page * @param datasets an array of the dataset resources in the project */ -final case class DatasetListResponse private (nextPageToken: Option[String], datasets: Option[Seq[Dataset]]) { +final case class DatasetListResponse private[bigquery] (nextPageToken: Option[String], datasets: Option[Seq[Dataset]]) { def getNextPageToken = nextPageToken.toJava def getDatasets = datasets.map(_.asJava).toJava diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala index 8aec41cdf..a454b0ae8 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala @@ -31,7 +31,8 @@ import scala.annotation.nowarn * @param location specifies where the error occurred, if present * @param message A human-readable description of the error */ -final case class ErrorProto private (reason: Option[String], location: Option[String], message: Option[String]) { +final case class ErrorProto private[bigquery] (reason: Option[String], location: Option[String], + message: Option[String]) { @nowarn("msg=never used") @JsonCreator diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala index c53722c02..bdd04a5ab 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/JobJsonProtocol.scala @@ -33,7 +33,7 @@ import scala.collection.immutable.Seq * @param jobReference reference describing the unique-per-user name of the job * @param status the status of this job */ -final case class Job private (configuration: Option[JobConfiguration], +final case class Job private[bigquery] (configuration: Option[JobConfiguration], jobReference: Option[JobReference], status: Option[JobStatus]) { @@ -83,7 +83,8 @@ object Job { * @param load configures a load job * @param labels the labels associated with this job */ -final case class JobConfiguration private (load: Option[JobConfigurationLoad], labels: Option[Map[String, String]]) { +final case class JobConfiguration private[bigquery] (load: Option[JobConfigurationLoad], + labels: Option[Map[String, String]]) { def getLoad = load.toJava def getLabels = labels.toJava @@ -144,7 +145,7 @@ object JobConfiguration { * @param writeDisposition specifies the action that occurs if the destination table already exists * @param sourceFormat the format of the data files */ -final case class JobConfigurationLoad private (schema: Option[TableSchema], +final case class JobConfigurationLoad private[bigquery] (schema: Option[TableSchema], destinationTable: Option[TableReference], createDisposition: Option[CreateDisposition], writeDisposition: Option[WriteDisposition], @@ -210,7 +211,7 @@ object JobConfigurationLoad { implicit val configurationLoadFormat: JsonFormat[JobConfigurationLoad] = jsonFormat5(apply) } -final case class CreateDisposition private (value: String) extends StringEnum +final case class CreateDisposition private[bigquery] (value: String) extends StringEnum object CreateDisposition { /** @@ -227,7 +228,7 @@ object CreateDisposition { implicit val format: JsonFormat[CreateDisposition] = StringEnum.jsonFormat(apply) } -final case class WriteDisposition private (value: String) extends StringEnum +final case class WriteDisposition private[bigquery] (value: String) extends StringEnum object WriteDisposition { /** @@ -269,7 +270,8 @@ object SourceFormat { * @param jobId the ID of the job * @param location the geographic location of the job */ -final case class JobReference private (projectId: Option[String], jobId: Option[String], location: Option[String]) { +final case class JobReference private[bigquery] (projectId: Option[String], jobId: Option[String], + location: Option[String]) { @nowarn("msg=never used") @JsonCreator @@ -323,7 +325,8 @@ object JobReference { * @param errors the first errors encountered during the running of the job * @param state running state of the job */ -final case class JobStatus private (errorResult: Option[ErrorProto], errors: Option[Seq[ErrorProto]], state: JobState) { +final case class JobStatus private[bigquery] (errorResult: Option[ErrorProto], errors: Option[Seq[ErrorProto]], + state: JobState) { def getErrorResult = errorResult.toJava def getErrors = errors.map(_.asJava).toJava @@ -360,7 +363,7 @@ object JobStatus { implicit val format: JsonFormat[JobStatus] = jsonFormat3(apply) } -final case class JobState private (value: String) extends StringEnum +final case class JobState private[bigquery] (value: String) extends StringEnum object JobState { /** @@ -380,7 +383,7 @@ object JobState { implicit val format: JsonFormat[JobState] = StringEnum.jsonFormat(apply) } -final case class JobCancelResponse private (job: Job) { +final case class JobCancelResponse private[bigquery] (job: Job) { def getJob = job def withJob(job: Job) = copy(job = job) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala index 56ce06f02..7149d8225 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/QueryJsonProtocol.scala @@ -21,11 +21,10 @@ import pekko.util.ccompat.JavaConverters._ import pekko.util.JavaDurationConverters._ import pekko.util.OptionConverters._ import com.fasterxml.jackson.annotation.{ JsonCreator, JsonIgnoreProperties, JsonProperty } -import spray.json.{ RootJsonFormat, RootJsonReader } +import spray.json.{ JsonFormat, RootJsonFormat, RootJsonReader } import java.time.Duration import java.{ lang, util } - import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance import scala.collection.immutable.Seq @@ -46,7 +45,7 @@ import scala.concurrent.duration.FiniteDuration * @param maximumBytesBilled limits the number of bytes billed for this query * @param requestId a unique user provided identifier to ensure idempotent behavior for queries */ -final case class QueryRequest private (query: String, +final case class QueryRequest private[bigquery] (query: String, maxResults: Option[Int], defaultDataset: Option[DatasetReference], timeout: Option[FiniteDuration], @@ -192,7 +191,7 @@ object QueryRequest { * @tparam T the data model for each row */ @JsonIgnoreProperties(ignoreUnknown = true) -final case class QueryResponse[+T] private (schema: Option[TableSchema], +final case class QueryResponse[+T] private[bigquery] (schema: Option[TableSchema], jobReference: JobReference, totalRows: Option[Long], pageToken: Option[String], @@ -329,7 +328,7 @@ object QueryResponse { implicit def reader[T <: AnyRef]( implicit reader: BigQueryRootJsonReader[T]): RootJsonReader[QueryResponse[T]] = { - implicit val format = lift(reader) + implicit val format: JsonFormat[T] = lift(reader) jsonFormat10(QueryResponse[T]) } implicit val paginated: Paginated[QueryResponse[Any]] = _.pageToken diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala index fa0451534..1ca69cc74 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala @@ -38,7 +38,8 @@ import scala.collection.immutable.Seq * @tparam T the data model of each row */ @JsonIgnoreProperties(ignoreUnknown = true) -final case class TableDataListResponse[+T] private (totalRows: Long, pageToken: Option[String], rows: Option[Seq[T]]) { +final case class TableDataListResponse[+T] private[bigquery] (totalRows: Long, pageToken: Option[String], + rows: Option[Seq[T]]) { @nowarn("msg=never used") @JsonCreator @@ -82,7 +83,7 @@ object TableDataListResponse { implicit def reader[T <: AnyRef]( implicit reader: BigQueryRootJsonReader[T]): RootJsonReader[TableDataListResponse[T]] = { - implicit val format = lift(reader) + implicit val format: JsonFormat[T] = lift(reader) jsonFormat3(TableDataListResponse[T]) } implicit val paginated: Paginated[TableDataListResponse[Any]] = _.pageToken @@ -99,7 +100,7 @@ object TableDataListResponse { * @tparam T the data model of each row */ @JsonInclude(Include.NON_NULL) -final case class TableDataInsertAllRequest[+T] private (skipInvalidRows: Option[Boolean], +final case class TableDataInsertAllRequest[+T] private[bigquery] (skipInvalidRows: Option[Boolean], ignoreUnknownValues: Option[Boolean], templateSuffix: Option[String], rows: Seq[Row[T]]) { @@ -179,7 +180,7 @@ object TableDataInsertAllRequest { * @param json the record this row contains * @tparam T the data model of the record */ -final case class Row[+T] private (insertId: Option[String], json: T) { +final case class Row[+T] private[bigquery] (insertId: Option[String], json: T) { def getInsertId = insertId.toJava def getJson = json @@ -212,7 +213,7 @@ object Row { * TableDataInsertAllResponse model * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ -final case class TableDataInsertAllResponse private (insertErrors: Option[Seq[InsertError]]) { +final case class TableDataInsertAllResponse private[bigquery] (insertErrors: Option[Seq[InsertError]]) { def getInsertErrors = insertErrors.map(_.asJava).toJava def withInsertErrors(insertErrors: Option[Seq[InsertError]]) = @@ -239,7 +240,7 @@ object TableDataInsertAllResponse { * InsertError model * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ -final case class InsertError private (index: Int, errors: Option[Seq[ErrorProto]]) { +final case class InsertError private[bigquery] (index: Int, errors: Option[Seq[ErrorProto]]) { def getIndex = index def getErrors = errors.map(_.asJava).toJava diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala index d193fb4d7..60bdae7f3 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableJsonProtocol.scala @@ -37,7 +37,7 @@ import scala.collection.immutable.Seq * @param numRows the number of rows of data in this table * @param location the geographic location where the table resides */ -final case class Table private (tableReference: TableReference, +final case class Table private[bigquery] (tableReference: TableReference, labels: Option[Map[String, String]], schema: Option[TableSchema], numRows: Option[Long], @@ -109,7 +109,8 @@ object Table { * @param datasetId the ID of the dataset containing this table * @param tableId the ID of the table */ -final case class TableReference private (projectId: Option[String], datasetId: String, tableId: Option[String]) { +final case class TableReference private[bigquery] (projectId: Option[String], datasetId: String, + tableId: Option[String]) { def getProjectId = projectId.toJava def getDatasetId = datasetId @@ -152,7 +153,7 @@ object TableReference { * * @param fields describes the fields in a table */ -final case class TableSchema private (fields: Seq[TableFieldSchema]) { +final case class TableSchema private[bigquery] (fields: Seq[TableFieldSchema]) { @nowarn("msg=never used") @JsonCreator @@ -200,7 +201,7 @@ object TableSchema { * @param mode the field mode * @param fields describes the nested schema fields if the type property is set to `RECORD` */ -final case class TableFieldSchema private (name: String, +final case class TableFieldSchema private[bigquery] (name: String, `type`: TableFieldSchemaType, mode: Option[TableFieldSchemaMode], fields: Option[Seq[TableFieldSchema]]) { @@ -278,7 +279,7 @@ object TableFieldSchema { jsonFormat(apply, "name", "type", "mode", "fields")) } -final case class TableFieldSchemaType private (value: String) extends StringEnum +final case class TableFieldSchemaType private[bigquery] (value: String) extends StringEnum object TableFieldSchemaType { /** @@ -328,7 +329,7 @@ object TableFieldSchemaType { implicit val format: JsonFormat[TableFieldSchemaType] = StringEnum.jsonFormat(apply) } -final case class TableFieldSchemaMode private (value: String) extends StringEnum +final case class TableFieldSchemaMode private[bigquery] (value: String) extends StringEnum object TableFieldSchemaMode { /** @@ -356,7 +357,7 @@ object TableFieldSchemaMode { * @param tables tables in the requested dataset * @param totalItems the total number of tables in the dataset */ -final case class TableListResponse private (nextPageToken: Option[String], +final case class TableListResponse private[bigquery] (nextPageToken: Option[String], tables: Option[Seq[Table]], totalItems: Option[Int]) { diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala index 99552c19b..847498fef 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryCollectionFormats.scala @@ -44,18 +44,26 @@ trait BigQueryCollectionFormats { import collection.{ immutable => imm } - implicit def immIterableFormat[T: BigQueryJsonFormat] = viaSeq[imm.Iterable[T], T](seq => imm.Iterable(seq: _*)) - implicit def immSeqFormat[T: BigQueryJsonFormat] = viaSeq[imm.Seq[T], T](seq => imm.Seq(seq: _*)) - implicit def immIndexedSeqFormat[T: BigQueryJsonFormat] = viaSeq[imm.IndexedSeq[T], T](seq => imm.IndexedSeq(seq: _*)) - implicit def immLinearSeqFormat[T: BigQueryJsonFormat] = viaSeq[imm.LinearSeq[T], T](seq => imm.LinearSeq(seq: _*)) - implicit def vectorFormat[T: BigQueryJsonFormat] = viaSeq[Vector[T], T](seq => Vector(seq: _*)) + implicit def immIterableFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[imm.Iterable[T]] = + viaSeq[imm.Iterable[T], T](seq => imm.Iterable(seq: _*)) + implicit def immSeqFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[imm.Seq[T]] = + viaSeq[imm.Seq[T], T](seq => imm.Seq(seq: _*)) + implicit def immIndexedSeqFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[imm.IndexedSeq[T]] = + viaSeq[imm.IndexedSeq[T], T](seq => imm.IndexedSeq(seq: _*)) + implicit def immLinearSeqFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[imm.LinearSeq[T]] = + viaSeq[imm.LinearSeq[T], T](seq => imm.LinearSeq(seq: _*)) + implicit def vectorFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[Vector[T]] = + viaSeq[Vector[T], T](seq => Vector(seq: _*)) import collection._ - implicit def iterableFormat[T: BigQueryJsonFormat] = viaSeq[Iterable[T], T](seq => Iterable(seq: _*)) - implicit def seqFormat[T: BigQueryJsonFormat] = viaSeq[Seq[T], T](seq => Seq(seq: _*)) - implicit def indexedSeqFormat[T: BigQueryJsonFormat] = viaSeq[IndexedSeq[T], T](seq => IndexedSeq(seq: _*)) - implicit def linearSeqFormat[T: BigQueryJsonFormat] = viaSeq[LinearSeq[T], T](seq => LinearSeq(seq: _*)) + implicit def iterableFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[Iterable[T]] = + viaSeq[Iterable[T], T](seq => Iterable(seq: _*)) + implicit def seqFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[Seq[T]] = viaSeq[Seq[T], T](seq => Seq(seq: _*)) + implicit def indexedSeqFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[IndexedSeq[T]] = + viaSeq[IndexedSeq[T], T](seq => IndexedSeq(seq: _*)) + implicit def linearSeqFormat[T: BigQueryJsonFormat]: BigQueryJsonFormat[collection.LinearSeq[T]] = + viaSeq[collection.LinearSeq[T], T](seq => collection.LinearSeq(seq: _*)) /** * A BigQueryJsonFormat construction helper that creates a BigQueryJsonFormat for an Iterable type I from a builder function diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala index 3dcd414e0..8c84f2997 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryRestBasicFormats.scala @@ -22,18 +22,18 @@ import scala.concurrent.duration.{ DurationLong, FiniteDuration } */ trait BigQueryRestBasicFormats { - implicit val IntJsonFormat = DefaultJsonProtocol.IntJsonFormat - implicit val FloatJsonFormat = DefaultJsonProtocol.FloatJsonFormat - implicit val DoubleJsonFormat = DefaultJsonProtocol.DoubleJsonFormat - implicit val ByteJsonFormat = DefaultJsonProtocol.ByteJsonFormat - implicit val ShortJsonFormat = DefaultJsonProtocol.ShortJsonFormat - implicit val BigDecimalJsonFormat = DefaultJsonProtocol.BigDecimalJsonFormat - implicit val BigIntJsonFormat = DefaultJsonProtocol.BigIntJsonFormat - implicit val UnitJsonFormat = DefaultJsonProtocol.UnitJsonFormat - implicit val BooleanJsonFormat = DefaultJsonProtocol.BooleanJsonFormat - implicit val CharJsonFormat = DefaultJsonProtocol.CharJsonFormat - implicit val StringJsonFormat = DefaultJsonProtocol.StringJsonFormat - implicit val SymbolJsonFormat = DefaultJsonProtocol.SymbolJsonFormat + implicit val IntJsonFormat: JsonFormat[Int] = DefaultJsonProtocol.IntJsonFormat + implicit val FloatJsonFormat: JsonFormat[Float] = DefaultJsonProtocol.FloatJsonFormat + implicit val DoubleJsonFormat: JsonFormat[Double] = DefaultJsonProtocol.DoubleJsonFormat + implicit val ByteJsonFormat: JsonFormat[Byte] = DefaultJsonProtocol.ByteJsonFormat + implicit val ShortJsonFormat: JsonFormat[Short] = DefaultJsonProtocol.ShortJsonFormat + implicit val BigDecimalJsonFormat: JsonFormat[BigDecimal] = DefaultJsonProtocol.BigDecimalJsonFormat + implicit val BigIntJsonFormat: JsonFormat[BigInt] = DefaultJsonProtocol.BigIntJsonFormat + implicit val UnitJsonFormat: JsonFormat[Unit] = DefaultJsonProtocol.UnitJsonFormat + implicit val BooleanJsonFormat: JsonFormat[Boolean] = DefaultJsonProtocol.BooleanJsonFormat + implicit val CharJsonFormat: JsonFormat[Char] = DefaultJsonProtocol.CharJsonFormat + implicit val StringJsonFormat: JsonFormat[String] = DefaultJsonProtocol.StringJsonFormat + implicit val SymbolJsonFormat: JsonFormat[Symbol] = DefaultJsonProtocol.SymbolJsonFormat implicit object BigQueryLongJsonFormat extends JsonFormat[Long] { def write(x: Long) = JsNumber(x) diff --git a/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala b/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala index 4875416f0..2c71e35e2 100644 --- a/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala +++ b/google-cloud-bigquery/src/test/scala/docs/scaladsl/BigQueryDoc.scala @@ -28,9 +28,11 @@ import pekko.stream.connectors.googlecloud.bigquery.model.{ TableDataListResponse, TableListResponse } -import pekko.stream.connectors.googlecloud.bigquery.scaladsl.BigQuery import pekko.stream.connectors.googlecloud.bigquery.scaladsl.schema.BigQuerySchemas._ +import pekko.stream.connectors.googlecloud.bigquery.scaladsl.schema.TableSchemaWriter +import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRootJsonFormat import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryJsonProtocol._ +import pekko.stream.connectors.googlecloud.bigquery.scaladsl.BigQuery import pekko.stream.scaladsl.{ Flow, Sink, Source } import pekko.{ Done, NotUsed } @@ -48,8 +50,8 @@ class BigQueryDoc { // #setup case class Person(name: String, age: Int, addresses: Seq[Address], isHakker: Boolean) case class Address(street: String, city: String, postalCode: Option[Int]) - implicit val addressFormat = bigQueryJsonFormat3(Address) - implicit val personFormat = bigQueryJsonFormat4(Person) + implicit val addressFormat: BigQueryRootJsonFormat[Address] = bigQueryJsonFormat3(Address.apply) + implicit val personFormat: BigQueryRootJsonFormat[Person] = bigQueryJsonFormat4(Person.apply) // #setup @nowarn("msg=dead code") @@ -113,8 +115,8 @@ class BigQueryDoc { // #table-methods // #create-table - implicit val addressSchema = bigQuerySchema3(Address) - implicit val personSchema = bigQuerySchema4(Person) + implicit val addressSchema: TableSchemaWriter[Address] = bigQuerySchema3(Address.apply) + implicit val personSchema: TableSchemaWriter[Person] = bigQuerySchema4(Person.apply) val newTable: Future[Table] = BigQuery.createTable[Person](datasetId, "newTableId") // #create-table diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala index 0009706bb..f79f96c26 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/A.scala @@ -39,7 +39,7 @@ case class A(integer: Int, long: Long, float: Float, double: Double, string: Str new B(f.get(6).get("v"))) def getInteger = integer - @JsonSerialize(using = classOf[ToStringSerializer]) + @JsonSerialize(`using` = classOf[ToStringSerializer]) def getLong = long def getFloat = float def getDouble = double @@ -74,7 +74,7 @@ case class C(numeric: BigDecimal, date: LocalDate, time: LocalTime, dateTime: Lo LocalDateTime.parse(node.get("f").get(3).get("v").textValue()), Instant.ofEpochMilli((BigDecimal(node.get("f").get(4).get("v").textValue()) * 1000).toLong)) - @JsonSerialize(using = classOf[ToStringSerializer]) + @JsonSerialize(`using` = classOf[ToStringSerializer]) def getNumeric = numeric def getDate = date def getTime = time diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala index 76e3b046e..a18ad78e5 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/e2e/scaladsl/BigQueryEndToEndSpec.scala @@ -14,12 +14,14 @@ package org.apache.pekko.stream.connectors.googlecloud.bigquery.e2e.scaladsl import org.apache.pekko -import pekko.actor.ActorSystem +import pekko.actor.{ ActorSystem, Scheduler } import pekko.{ pattern, Done } import pekko.stream.connectors.googlecloud.bigquery.HoverflySupport import pekko.stream.connectors.googlecloud.bigquery.e2e.{ A, B, C } import pekko.stream.connectors.googlecloud.bigquery.model.JobState import pekko.stream.connectors.googlecloud.bigquery.model.TableReference +import pekko.stream.connectors.googlecloud.bigquery.scaladsl.schema.TableSchemaWriter +import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryRootJsonFormat import pekko.testkit.TestKit import io.specto.hoverfly.junit.core.{ HoverflyMode, SimulationSource } import org.scalatest.BeforeAndAfterAll @@ -56,7 +58,7 @@ class BigQueryEndToEndSpec super.afterAll() } - implicit def scheduler = system.scheduler + implicit def scheduler: Scheduler = system.scheduler "BigQuery Scala DSL" should { @@ -66,12 +68,12 @@ class BigQueryEndToEndSpec import pekko.stream.connectors.googlecloud.bigquery.scaladsl.spray.BigQueryJsonProtocol._ import pekko.stream.scaladsl.{ Sink, Source } - implicit val cFormat = bigQueryJsonFormat5(C) - implicit val bFormat = bigQueryJsonFormat3(B) - implicit val aFormat = bigQueryJsonFormat7(A) - implicit val cSchema = bigQuerySchema5(C) - implicit val bSchema = bigQuerySchema3(B) - implicit val aSchema = bigQuerySchema7(A) + implicit val cFormat: BigQueryRootJsonFormat[C] = bigQueryJsonFormat5(C.apply) + implicit val bFormat: BigQueryRootJsonFormat[B] = bigQueryJsonFormat3(B.apply) + implicit val aFormat: BigQueryRootJsonFormat[A] = bigQueryJsonFormat7(A.apply) + implicit val cSchema: TableSchemaWriter[C] = bigQuerySchema5(C.apply) + implicit val bSchema: TableSchemaWriter[B] = bigQuerySchema3(B.apply) + implicit val aSchema: TableSchemaWriter[A] = bigQuerySchema7(A.apply) "create dataset" in { BigQuery.createDataset(datasetId).map { dataset => diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala index 7051651d9..a02043b99 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala @@ -94,7 +94,7 @@ class BigQueryQueriesSpec .post(BigQueryEndpoints.queries(settings.projectId).path.toString) .queryParam("prettyPrint", "false") .anyBody() - .willReturn(success(completeQuery.toJson.toString(), "application/json")))) + .willReturn(success(completeQuery.toJson.compactPrint, "application/json")))) query[JsValue]("SQL") .addAttributes(GoogleAttributes.settings(settings)) @@ -111,7 +111,7 @@ class BigQueryQueriesSpec .post(BigQueryEndpoints.queries(settings.projectId).path.toString) .queryParam("prettyPrint", "false") .anyBody() - .willReturn(success(completeQueryWith2ndPage.toJson.toString(), "application/json")) + .willReturn(success(completeQueryWith2ndPage.toJson.compactPrint, "application/json")) .get(BigQueryEndpoints.query(settings.projectId, jobId).path.toString) .queryParam("pageToken", pageToken) .queryParam("prettyPrint", "false") @@ -132,7 +132,7 @@ class BigQueryQueriesSpec .post(BigQueryEndpoints.queries(settings.projectId).path.toString) .queryParam("prettyPrint", "false") .anyBody() - .willReturn(success(incompleteQuery.toJson.toString(), "application/json")) + .willReturn(success(incompleteQuery.toJson.compactPrint, "application/json")) .get(BigQueryEndpoints.query(settings.projectId, jobId).path.toString) .queryParam("prettyPrint", "false") .willReturn(success(completeQuery.toJson.toString, "application/json")))) @@ -152,7 +152,7 @@ class BigQueryQueriesSpec .post(BigQueryEndpoints.queries(settings.projectId).path.toString) .queryParam("prettyPrint", "false") .anyBody() - .willReturn(success(incompleteQuery.toJson.toString(), "application/json")) + .willReturn(success(incompleteQuery.toJson.compactPrint, "application/json")) .get(BigQueryEndpoints.query(settings.projectId, jobId).path.toString) .queryParam("prettyPrint", "false") .willReturn(success(completeQueryWith2ndPage.toJson.toString, "application/json")) @@ -176,7 +176,7 @@ class BigQueryQueriesSpec .post(BigQueryEndpoints.queries(settings.projectId).path.toString) .queryParam("prettyPrint", "false") .anyBody() - .willReturn(success(completeQueryWithoutJobId.toJson.toString(), "application/json")))) + .willReturn(success(completeQueryWithoutJobId.toJson.compactPrint, "application/json")))) query[JsValue]("SQL") .addAttributes(GoogleAttributes.settings(settings)) @@ -204,7 +204,7 @@ class BigQueryQueriesSpec .post(BigQueryEndpoints.queries(settings.projectId).path.toString) .queryParam("prettyPrint", "false") .anyBody() - .willReturn(success(completeQuery.toJson.toString(), "application/json")))) + .willReturn(success(completeQuery.toJson.compactPrint, "application/json")))) recoverToSucceededIf[BrokenParserException] { query[JsValue]("SQL") diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/BigQuerySchemasSpec.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/BigQuerySchemasSpec.scala index 0aef83003..a46711188 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/BigQuerySchemasSpec.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/BigQuerySchemasSpec.scala @@ -50,20 +50,20 @@ class BigQuerySchemasSpec extends AnyWordSpecLike with Matchers { "BigQuerySchemas" should { "correctly generate schema" in { - implicit val cSchemaWriter = bigQuerySchema1(C) - implicit val bSchemaWriter = bigQuerySchema2(B) - val generatedSchema = bigQuerySchema7(A).write + implicit val cSchemaWriter: TableSchemaWriter[C] = bigQuerySchema1(C.apply) + implicit val bSchemaWriter: TableSchemaWriter[B] = bigQuerySchema2(B.apply) + val generatedSchema = bigQuerySchema7(A.apply).write generatedSchema shouldEqual schema } "throw exception when nesting options" in { case class Invalid(invalid: Option[Option[String]]) - assertThrows[IllegalArgumentException](bigQuerySchema1(Invalid).write) + assertThrows[IllegalArgumentException](bigQuerySchema1(Invalid.apply).write) } "throw exception when nesting options inside seqs" in { case class Invalid(invalid: Seq[Option[String]]) - assertThrows[IllegalArgumentException](bigQuerySchema1(Invalid).write) + assertThrows[IllegalArgumentException](bigQuerySchema1(Invalid.apply).write) } } } diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryJsonProtocolSpec.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryJsonProtocolSpec.scala index 36dd1d822..1e7a48aff 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryJsonProtocolSpec.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryJsonProtocolSpec.scala @@ -59,8 +59,8 @@ class BigQueryJsonProtocolSpec extends BigQueryJsonProtocol with AnyWordSpecLike case class Record(name: Option[String], addresses: Seq[Address]) case class Address(street: Option[String], city: Option[String]) - implicit val addressFormat = bigQueryJsonFormat2(Address) - implicit val recordFormat = bigQueryJsonFormat2(Record) + implicit val addressFormat: BigQueryRootJsonFormat[Address] = bigQueryJsonFormat2(Address.apply) + implicit val recordFormat: BigQueryRootJsonFormat[Record] = bigQueryJsonFormat2(Record.apply) "BigQueryJsonProtocol" should { diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala index 66b1e27a7..90dd33581 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala @@ -69,7 +69,7 @@ private[pubsub] trait PubSubApi { def PubSubGoogleApisPort: Int def isEmulated: Boolean - private implicit val instantFormat = new RootJsonFormat[Instant] { + private implicit val instantFormat: RootJsonFormat[Instant] = new RootJsonFormat[Instant] { override def read(jsValue: JsValue): Instant = jsValue match { case JsString(time) => Instant.parse(time) case _ => deserializationError("Instant required as a string of RFC3339 UTC Zulu format.") @@ -77,7 +77,7 @@ private[pubsub] trait PubSubApi { override def write(instant: Instant): JsValue = JsString(instant.toString) } - private implicit val pubSubMessageFormat = + private implicit val pubSubMessageFormat: RootJsonFormat[PubSubMessage] = new RootJsonFormat[PubSubMessage] { override def read(json: JsValue): PubSubMessage = { val fields = json.asJsObject.fields @@ -98,7 +98,7 @@ private[pubsub] trait PubSubApi { ++ m.attributes.map(attributes => "attributes" -> attributes.toJson): _*) } - private implicit val publishMessageFormat = new RootJsonFormat[PublishMessage] { + private implicit val publishMessageFormat: RootJsonFormat[PublishMessage] = new RootJsonFormat[PublishMessage] { def read(json: JsValue): PublishMessage = { val data = json.asJsObject.fields("data").convertTo[String] val attributes = json.asJsObject.fields("attributes").convertTo[immutable.Map[String, String]] @@ -112,37 +112,39 @@ private[pubsub] trait PubSubApi { m.attributes.map(a => "attributes" -> a.toJson): _*) } - private implicit val pubSubRequestFormat = new RootJsonFormat[PublishRequest] { + private implicit val pubSubRequestFormat: RootJsonFormat[PublishRequest] = new RootJsonFormat[PublishRequest] { def read(json: JsValue): PublishRequest = PublishRequest(json.asJsObject.fields("messages").convertTo[immutable.Seq[PublishMessage]]) def write(pr: PublishRequest): JsValue = JsObject("messages" -> pr.messages.toJson) } - private implicit val gcePubSubResponseFormat = new RootJsonFormat[PublishResponse] { + private implicit val gcePubSubResponseFormat: RootJsonFormat[PublishResponse] = new RootJsonFormat[PublishResponse] { def read(json: JsValue): PublishResponse = PublishResponse(json.asJsObject.fields("messageIds").convertTo[immutable.Seq[String]]) def write(pr: PublishResponse): JsValue = JsObject("messageIds" -> pr.messageIds.toJson) } - private implicit val receivedMessageFormat = new RootJsonFormat[ReceivedMessage] { + private implicit val receivedMessageFormat: RootJsonFormat[ReceivedMessage] = new RootJsonFormat[ReceivedMessage] { def read(json: JsValue): ReceivedMessage = ReceivedMessage(json.asJsObject.fields("ackId").convertTo[String], json.asJsObject.fields("message").convertTo[PubSubMessage]) def write(rm: ReceivedMessage): JsValue = JsObject("ackId" -> rm.ackId.toJson, "message" -> rm.message.toJson) } - private implicit val pubSubPullResponseFormat = new RootJsonFormat[PullResponse] { + private implicit val pubSubPullResponseFormat: RootJsonFormat[PullResponse] = new RootJsonFormat[PullResponse] { def read(json: JsValue): PullResponse = PullResponse(json.asJsObject.fields.get("receivedMessages").map(_.convertTo[immutable.Seq[ReceivedMessage]])) def write(pr: PullResponse): JsValue = pr.receivedMessages.map(rm => JsObject("receivedMessages" -> rm.toJson)).getOrElse(JsObject.empty) } - private implicit val acknowledgeRequestFormat = new RootJsonFormat[AcknowledgeRequest] { - def read(json: JsValue): AcknowledgeRequest = - AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) - def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) - } - private implicit val pullRequestFormat = DefaultJsonProtocol.jsonFormat2(PullRequest.apply) + private implicit val acknowledgeRequestFormat: RootJsonFormat[AcknowledgeRequest] = + new RootJsonFormat[AcknowledgeRequest] { + def read(json: JsValue): AcknowledgeRequest = + AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) + def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) + } + private implicit val pullRequestFormat: RootJsonFormat[PullRequest] = + DefaultJsonProtocol.jsonFormat2(PullRequest.apply) private def scheme: String = if (isEmulated) "http" else "https" diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala index d5bbb9e7e..6228752a1 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala @@ -60,7 +60,7 @@ class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures s"pekko.connectors.google.credentials.none.project-id = ${TestCredentials.projectId}") .withFallback(ConfigFactory.load())) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 5.seconds, interval = 100.millis) def createInsecureSslEngine(host: String, port: Int): SSLEngine = { diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala index 6ca40022d..ae402473f 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/GoogleSettings.scala @@ -90,7 +90,8 @@ object GoogleSettings { } -final case class GoogleSettings @InternalApi private[connectors] (projectId: String, +@InternalApi +final case class GoogleSettings(projectId: String, credentials: Credentials, requestSettings: RequestSettings) { def getProjectId = projectId @@ -134,7 +135,8 @@ object RequestSettings { apply(userIp.toScala, quotaUser.toScala, prettyPrint, chunkSize, retrySettings, forwardProxy.toScala) } -final case class RequestSettings @InternalApi private[connectors] ( +@InternalApi +final case class RequestSettings( userIp: Option[String], quotaUser: Option[String], prettyPrint: Boolean, diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index 8dbc91cc2..969cef4c1 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -14,7 +14,6 @@ package org.apache.pekko.stream.connectors.google import org.apache.pekko -import pekko.actor.ActorSystem import pekko.NotUsed import pekko.annotation.InternalApi import pekko.http.scaladsl.model.HttpMethods.{ POST, PUT } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/NoCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/NoCredentials.scala index f65b2a534..6bff78f49 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/NoCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/NoCredentials.scala @@ -32,7 +32,7 @@ private[connectors] object NoCredentials { } @InternalApi -private[auth] final case class NoCredentials private (projectId: String, token: String) extends Credentials { +private[connectors] final case class NoCredentials(projectId: String, token: String) extends Credentials { private val futureToken = Future.successful(OAuth2BearerToken(token)) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 8006d1e7d..6158ce97b 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -195,7 +195,6 @@ object Dependencies { ) ++ Mockito) val GoogleBigQuery = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-jackson" % PekkoHttpVersion % Provided, @@ -221,7 +220,6 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion) ++ Mockito) val GooglePubSub = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, From 4c84bcac26d76c205e872551f26c2aa4051143a7 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Fri, 9 Jun 2023 12:48:11 +0100 Subject: [PATCH 19/90] scala 3 support for json streaming (#146) * scala 3 support for json streaming Update JsonStreamReader.scala make buffer a val Update JsonStreamReader.scala Update JsonStreamReader.scala use Iterable.single Delete MyByteString.scala compile issue in scala 2.12 Update JsonStreamReader.scala * Update Dependencies.scala * use separate scala2/3 code * Update QueueHelper.scala --- build.sbt | 11 +++++++++- .../connectors/json/impl/QueueHelper.scala | 19 +++++++++++++++++ .../connectors/json/impl/QueueHelper.scala | 21 +++++++++++++++++++ .../json/impl/JsonStreamReader.scala | 6 ++++-- project/Dependencies.scala | 1 - 5 files changed, 54 insertions(+), 4 deletions(-) create mode 100644 json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala create mode 100644 json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala diff --git a/build.sbt b/build.sbt index a6b815c93..deeaf3022 100644 --- a/build.sbt +++ b/build.sbt @@ -277,7 +277,16 @@ lazy val ironmq = pekkoConnectorProject( lazy val jms = pekkoConnectorProject("jms", "jms", Dependencies.Jms) -lazy val jsonStreaming = pekkoConnectorProject("json-streaming", "json.streaming", Dependencies.JsonStreaming) +val scalaReleaseSeparateSource: Def.SettingsDefinition = Compile / unmanagedSourceDirectories ++= { + if (scalaVersion.value.startsWith("2")) { + Seq((LocalRootProject / baseDirectory).value / "src" / "main" / "scala-2") + } else { + Seq((LocalRootProject / baseDirectory).value / "src" / "main" / "scala-3") + } +} + +lazy val jsonStreaming = pekkoConnectorProject("json-streaming", "json.streaming", + Dependencies.JsonStreaming ++ scalaReleaseSeparateSource) lazy val kinesis = pekkoConnectorProject("kinesis", "aws.kinesis", Dependencies.Kinesis) diff --git a/json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala b/json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala new file mode 100644 index 000000000..5972a1eca --- /dev/null +++ b/json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +package org.apache.pekko.stream.connectors.json.impl + +import org.apache.pekko.util.ByteString + +import scala.collection.immutable.Queue + +private[impl] object QueueHelper { + @inline final def enqueue(queue: Queue[ByteString], byteString: ByteString): Queue[ByteString] = + queue.enqueue(byteString) +} diff --git a/json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala b/json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala new file mode 100644 index 000000000..27b1f7f5e --- /dev/null +++ b/json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +package org.apache.pekko.stream.connectors.json.impl + +import org.apache.pekko.util.ByteString + +import scala.collection.immutable.Queue + +private[impl] object QueueHelper { + inline final def enqueue(queue: Queue[ByteString], byteString: ByteString): Queue[ByteString] = { + // see https://github.com/lampepfl/dotty/issues/17946 + queue.enqueueAll(Iterable.single(byteString)) + } +} diff --git a/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala b/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala index d93310d80..990b68729 100644 --- a/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala +++ b/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala @@ -48,8 +48,10 @@ private[pekko] final class JsonStreamReader(path: JsonPath) extends GraphStage[F private val config = surfer.configBuilder .bind(path, new JsonPathListener { - override def onValue(value: Any, context: ParsingContext): Unit = - buffer = buffer.enqueue(ByteString(value.toString)) + override def onValue(value: Any, context: ParsingContext): Unit = { + // see https://github.com/lampepfl/dotty/issues/17946 + buffer = QueueHelper.enqueue(buffer, ByteString(value.toString)) + } }) .build private val parser = surfer.createNonBlockingParser(config) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 6158ce97b..1a1f851c9 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -310,7 +310,6 @@ object Dependencies { "https://repository.jboss.org/nexus/content/groups/public")) +: externalResolvers.value) val JsonStreaming = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.github.jsurfer" % "jsurfer-jackson" % "1.6.0") ++ JacksonDatabindDependencies) From a5796711f4b19f358f976b189581b980da34b2b0 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 10 Jun 2023 11:05:22 +0100 Subject: [PATCH 20/90] Scala3 support for jms connector (#145) * try scala 3 (#123) * enable scala3 support for jms connector * refactor * scala 2.12 compile issue * enable elasticsearch Update RestBulkApi.scala sort imports revert merge issue Update CouchbaseFlow.scala --- .../elasticsearch/impl/RestBulkApi.scala | 6 +- .../ElasticsearchConnectorBehaviour.scala | 4 +- .../OpensearchConnectorBehaviour.scala | 4 +- .../googlecloud/pubsub/impl/PubSubApi.scala | 34 +++++------ .../googlecloud/storage/impl/Formats.scala | 12 ++-- .../storage/WithMaterializerGlobal.scala | 2 +- .../connectors/google/ResumableUpload.scala | 59 ++++++++++--------- .../google/auth/GoogleOAuth2Spec.scala | 2 +- .../google/auth/OAuth2CredentialsSpec.scala | 2 +- .../firebase/fcm/v1/impl/FcmSenderSpec.scala | 2 +- .../jms/impl/GraphStageCompanion.scala | 34 +++++++++++ .../jms/impl/JmsAckSourceStage.scala | 2 +- .../connectors/jms/impl/JmsConnector.scala | 20 +++---- .../jms/impl/JmsConsumerStage.scala | 2 +- .../jms/impl/JmsProducerStage.scala | 18 +++++- .../jms/impl/JmsTxSourceStage.scala | 2 +- .../jms/impl/SourceStageLogic.scala | 23 ++++++-- project/Dependencies.scala | 10 +++- 18 files changed, 148 insertions(+), 90 deletions(-) create mode 100644 jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala index 70e016de6..e9db5c111 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/impl/RestBulkApi.scala @@ -46,8 +46,8 @@ private[impl] abstract class RestBulkApi[T, C] { def messageToJson(message: WriteMessage[T, C], messageSource: String): String = message.operation match { case Index | Create => "\n" + messageSource - case Upsert => "\n" + JsObject("doc" -> messageSource.parseJson, "doc_as_upsert" -> JsTrue).toString - case Update => "\n" + JsObject("doc" -> messageSource.parseJson).toString + case Upsert => "\n" + JsObject("doc" -> messageSource.parseJson, "doc_as_upsert" -> JsTrue).compactPrint + case Update => "\n" + JsObject("doc" -> messageSource.parseJson).compactPrint case Delete => "" case Nop => "" } @@ -69,7 +69,7 @@ private[impl] abstract class RestBulkApi[T, C] { // good message val command = message.operation.command val res = itemsIter.next().asJsObject.fields(command).asJsObject - val error: Option[String] = res.fields.get("error").map(_.toString()) + val error: Option[String] = res.fields.get("error").map(_.compactPrint) ret += new WriteResult(message, error) } else { // error? diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala index 33b8c8032..9f360a90d 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala @@ -200,7 +200,7 @@ trait ElasticsearchConnectorBehaviour { .via( ElasticsearchFlow.create( constructElasticsearchParams(indexName, "_doc", apiVersion), - baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))) + baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))(RootJsObjectFormat)) .runWith(Sink.seq) val start = System.currentTimeMillis() @@ -286,7 +286,7 @@ trait ElasticsearchConnectorBehaviour { .via( ElasticsearchFlow.createWithContext( constructElasticsearchParams(indexName, "_doc", apiVersion), - baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))) + baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))(RootJsObjectFormat)) .runWith(Sink.seq) val start = System.currentTimeMillis() diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala index 77c513ee1..1c235685a 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala @@ -200,7 +200,7 @@ trait OpensearchConnectorBehaviour { .via( ElasticsearchFlow.create( constructElasticsearchParams(indexName, "_doc", apiVersion), - baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))) + baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))(RootJsObjectFormat)) .runWith(Sink.seq) val start = System.currentTimeMillis() @@ -286,7 +286,7 @@ trait OpensearchConnectorBehaviour { .via( ElasticsearchFlow.createWithContext( constructElasticsearchParams(indexName, "_doc", apiVersion), - baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))) + baseWriteSettings.withRetryLogic(RetryAtFixedRate(5, 100.millis)))(RootJsObjectFormat)) .runWith(Sink.seq) val start = System.currentTimeMillis() diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala index 90dd33581..696406f89 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala @@ -69,7 +69,7 @@ private[pubsub] trait PubSubApi { def PubSubGoogleApisPort: Int def isEmulated: Boolean - private implicit val instantFormat: RootJsonFormat[Instant] = new RootJsonFormat[Instant] { + private implicit val instantFormat = new RootJsonFormat[Instant] { override def read(jsValue: JsValue): Instant = jsValue match { case JsString(time) => Instant.parse(time) case _ => deserializationError("Instant required as a string of RFC3339 UTC Zulu format.") @@ -77,7 +77,7 @@ private[pubsub] trait PubSubApi { override def write(instant: Instant): JsValue = JsString(instant.toString) } - private implicit val pubSubMessageFormat: RootJsonFormat[PubSubMessage] = + private implicit val pubSubMessageFormat = new RootJsonFormat[PubSubMessage] { override def read(json: JsValue): PubSubMessage = { val fields = json.asJsObject.fields @@ -98,7 +98,7 @@ private[pubsub] trait PubSubApi { ++ m.attributes.map(attributes => "attributes" -> attributes.toJson): _*) } - private implicit val publishMessageFormat: RootJsonFormat[PublishMessage] = new RootJsonFormat[PublishMessage] { + private implicit val publishMessageFormat = new RootJsonFormat[PublishMessage] { def read(json: JsValue): PublishMessage = { val data = json.asJsObject.fields("data").convertTo[String] val attributes = json.asJsObject.fields("attributes").convertTo[immutable.Map[String, String]] @@ -112,39 +112,37 @@ private[pubsub] trait PubSubApi { m.attributes.map(a => "attributes" -> a.toJson): _*) } - private implicit val pubSubRequestFormat: RootJsonFormat[PublishRequest] = new RootJsonFormat[PublishRequest] { + private implicit val pubSubRequestFormat = new RootJsonFormat[PublishRequest] { def read(json: JsValue): PublishRequest = PublishRequest(json.asJsObject.fields("messages").convertTo[immutable.Seq[PublishMessage]]) def write(pr: PublishRequest): JsValue = JsObject("messages" -> pr.messages.toJson) } - private implicit val gcePubSubResponseFormat: RootJsonFormat[PublishResponse] = new RootJsonFormat[PublishResponse] { + private implicit val gcePubSubResponseFormat = new RootJsonFormat[PublishResponse] { def read(json: JsValue): PublishResponse = PublishResponse(json.asJsObject.fields("messageIds").convertTo[immutable.Seq[String]]) def write(pr: PublishResponse): JsValue = JsObject("messageIds" -> pr.messageIds.toJson) } - private implicit val receivedMessageFormat: RootJsonFormat[ReceivedMessage] = new RootJsonFormat[ReceivedMessage] { + private implicit val receivedMessageFormat = new RootJsonFormat[ReceivedMessage] { def read(json: JsValue): ReceivedMessage = ReceivedMessage(json.asJsObject.fields("ackId").convertTo[String], json.asJsObject.fields("message").convertTo[PubSubMessage]) def write(rm: ReceivedMessage): JsValue = JsObject("ackId" -> rm.ackId.toJson, "message" -> rm.message.toJson) } - private implicit val pubSubPullResponseFormat: RootJsonFormat[PullResponse] = new RootJsonFormat[PullResponse] { + private implicit val pubSubPullResponseFormat = new RootJsonFormat[PullResponse] { def read(json: JsValue): PullResponse = PullResponse(json.asJsObject.fields.get("receivedMessages").map(_.convertTo[immutable.Seq[ReceivedMessage]])) def write(pr: PullResponse): JsValue = pr.receivedMessages.map(rm => JsObject("receivedMessages" -> rm.toJson)).getOrElse(JsObject.empty) } - private implicit val acknowledgeRequestFormat: RootJsonFormat[AcknowledgeRequest] = - new RootJsonFormat[AcknowledgeRequest] { - def read(json: JsValue): AcknowledgeRequest = - AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) - def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) - } - private implicit val pullRequestFormat: RootJsonFormat[PullRequest] = - DefaultJsonProtocol.jsonFormat2(PullRequest.apply) + private implicit val acknowledgeRequestFormat = new RootJsonFormat[AcknowledgeRequest] { + def read(json: JsValue): AcknowledgeRequest = + AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) + def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) + } + private implicit val pullRequestFormat = DefaultJsonProtocol.jsonFormat2(PullRequest.apply) private def scheme: String = if (isEmulated) "http" else "https" @@ -175,7 +173,7 @@ private[pubsub] trait PubSubApi { .mapMaterializedValue(_ => NotUsed) private implicit val pullResponseUnmarshaller: FromResponseUnmarshaller[PullResponse] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => response.status match { case StatusCodes.Success(_) if response.entity.contentType == ContentTypes.`application/json` => Unmarshal(response.entity).to[PullResponse] @@ -213,7 +211,7 @@ private[pubsub] trait PubSubApi { .mapMaterializedValue(_ => NotUsed) private implicit val acknowledgeResponseUnmarshaller: FromResponseUnmarshaller[Done] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => response.status match { case StatusCodes.Success(_) => response.discardEntityBytes().future @@ -263,7 +261,7 @@ private[pubsub] trait PubSubApi { publish(topic, parallelism, None) private implicit val publishResponseUnmarshaller: FromResponseUnmarshaller[PublishResponse] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => response.status match { case StatusCodes.Success(_) if response.entity.contentType == ContentTypes.`application/json` => Unmarshal(response.entity).to[PublishResponse] diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala index ee2da5dcc..5c20c3931 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala @@ -49,8 +49,7 @@ object Formats extends DefaultJsonProtocol { domain: String, projectTeam: ProjectTeam, etag: String) - private implicit val ObjectAccessControlsJsonFormat: RootJsonFormat[ObjectAccessControls] = - jsonFormat13(ObjectAccessControls.apply) + private implicit val ObjectAccessControlsJsonFormat = jsonFormat13(ObjectAccessControls) /** * Google API storage response object @@ -80,8 +79,7 @@ object Formats extends DefaultJsonProtocol { timeStorageClassUpdated: String, updated: String) - private implicit val storageObjectReadOnlyJson: RootJsonFormat[StorageObjectReadOnlyJson] = - jsonFormat18(StorageObjectReadOnlyJson.apply) + private implicit val storageObjectReadOnlyJson = jsonFormat18(StorageObjectReadOnlyJson) // private sub class of StorageObjectJson used to workaround 22 field jsonFormat issue private final case class StorageObjectWriteableJson( @@ -100,8 +98,7 @@ object Formats extends DefaultJsonProtocol { temporaryHold: Option[Boolean], acl: Option[List[ObjectAccessControls]]) - private implicit val storageObjectWritableJson: RootJsonFormat[StorageObjectWriteableJson] = - jsonFormat14(StorageObjectWriteableJson.apply) + private implicit val storageObjectWritableJson = jsonFormat14(StorageObjectWriteableJson) private implicit object StorageObjectJsonFormat extends RootJsonFormat[StorageObjectJson] { override def read(value: JsValue): StorageObjectJson = { @@ -178,8 +175,7 @@ object Formats extends DefaultJsonProtocol { } } - private implicit val bucketListResultJsonReads: RootJsonFormat[BucketListResultJson] = - jsonFormat4(BucketListResultJson.apply) + private implicit val bucketListResultJsonReads = jsonFormat4(BucketListResultJson.apply) implicit object RewriteResponseReads extends RootJsonReader[RewriteResponse] { override def read(json: JsValue): RewriteResponse = { diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala index 787885b91..eb046720a 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala @@ -29,7 +29,7 @@ trait WithMaterializerGlobal with ScalaFutures with IntegrationPatience with Matchers { - implicit val actorSystem: ActorSystem = ActorSystem("test") + implicit val actorSystem = ActorSystem("test") implicit val ec: ExecutionContext = actorSystem.dispatcher override protected def afterAll(): Unit = { diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index 969cef4c1..f50c921b9 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google import org.apache.pekko +import pekko.actor.ActorSystem import pekko.NotUsed import pekko.annotation.InternalApi import pekko.http.scaladsl.model.HttpMethods.{ POST, PUT } @@ -55,7 +56,7 @@ private[connectors] object ResumableUpload { Sink .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val materializer: Materializer = mat + implicit val materializer = mat implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uploadChunkSize = settings.requestSettings.uploadChunkSize @@ -95,24 +96,25 @@ private[connectors] object ResumableUpload { private def initiateSession(request: HttpRequest)(implicit mat: Materializer, settings: GoogleSettings): Future[Uri] = { + implicit val system: ActorSystem = mat.system import implicits._ - implicit val um: FromResponseUnmarshaller[Uri] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => - response.discardEntityBytes().future.map { _ => - response.header[Location].fold(throw InvalidResponseException(ErrorInfo("No Location header")))(_.uri) - } - }.withDefaultRetry + implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + response.discardEntityBytes().future.map { _ => + response.header[Location].fold(throw InvalidResponseException(ErrorInfo("No Location header")))(_.uri) + } + }.withDefaultRetry - GoogleHttp(mat.system).singleAuthenticatedRequest[Uri](request) + GoogleHttp().singleAuthenticatedRequest[Uri](request) } private final case class DoNotRetry(ex: Throwable) extends Throwable(ex) with NoStackTrace private def uploadChunk[T: FromResponseUnmarshaller]( request: HttpRequest)(implicit mat: Materializer): Flow[Either[T, MaybeLast[Chunk]], Try[Option[T]], NotUsed] = { + implicit val system: ActorSystem = mat.system - val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => response.status match { case PermanentRedirect => response.discardEntityBytes().future.map(_ => None) @@ -125,8 +127,7 @@ private[connectors] object ResumableUpload { val uri = request.uri Flow[HttpRequest] .map((_, ())) - .via(GoogleHttp(mat.system).cachedHostConnectionPoolWithContext(uri.authority.host.address, uri.effectivePort)( - um)) + .via(GoogleHttp().cachedHostConnectionPoolWithContext(uri.authority.host.address, uri.effectivePort)(um)) .map(_._1.recoverWith { case DoNotRetry(ex) => Failure(ex) }) } @@ -146,30 +147,30 @@ private[connectors] object ResumableUpload { request: HttpRequest, chunk: Future[MaybeLast[Chunk]])( implicit mat: Materializer, settings: GoogleSettings): Future[Either[T, MaybeLast[Chunk]]] = { + implicit val system: ActorSystem = mat.system import implicits._ - implicit val um: FromResponseUnmarshaller[Either[T, Long]] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => - response.status match { - case OK | Created => Unmarshal(response).to[T].map(Left(_)) - case PermanentRedirect => - response.discardEntityBytes().future.map { _ => - Right( - response - .header[Range] - .flatMap(_.ranges.headOption) - .collect { - case Slice(_, last) => last + 1 - }.getOrElse(0L)) - } - case _ => throw InvalidResponseException(ErrorInfo(response.status.value, response.status.defaultMessage)) - } - }.withDefaultRetry + implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + response.status match { + case OK | Created => Unmarshal(response).to[T].map(Left(_)) + case PermanentRedirect => + response.discardEntityBytes().future.map { _ => + Right( + response + .header[Range] + .flatMap(_.ranges.headOption) + .collect { + case Slice(_, last) => last + 1 + }.getOrElse(0L)) + } + case _ => throw InvalidResponseException(ErrorInfo(response.status.value, response.status.defaultMessage)) + } + }.withDefaultRetry import mat.executionContext chunk.flatMap { case maybeLast @ MaybeLast(Chunk(bytes, position)) => - GoogleHttp(mat.system) + GoogleHttp() .singleAuthenticatedRequest[Either[T, Long]](request.addHeader(statusRequestHeader)) .map { case Left(result) if maybeLast.isLast => Left(result) diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala index 753e3004a..1bd56b790 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala @@ -46,7 +46,7 @@ class GoogleOAuth2Spec implicit val executionContext: ExecutionContext = system.dispatcher implicit val settings: GoogleSettings = GoogleSettings(system) - implicit val clock: Clock = Clock.systemUTC() + implicit val clock = Clock.systemUTC() lazy val privateKey = { val inputStream = getClass.getClassLoader.getResourceAsStream("private_pcks8.pem") diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala index 446336dd9..7d7e2342a 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala @@ -45,7 +45,7 @@ class OAuth2CredentialsSpec import system.dispatcher implicit val settings: RequestSettings = GoogleSettings().requestSettings - implicit val clock: Clock = Clock.systemUTC() + implicit val clock = Clock.systemUTC() final object AccessTokenProvider { @volatile var accessTokenPromise: Promise[AccessToken] = Promise.failed(new RuntimeException) diff --git a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala index ca6c18820..a2d4d4922 100644 --- a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala +++ b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala @@ -57,7 +57,7 @@ class FcmSenderSpec implicit val executionContext: ExecutionContext = system.dispatcher - implicit val conf: FcmSettings = FcmSettings() + implicit val conf = FcmSettings() implicit val settings: GoogleSettings = GoogleSettings().copy(projectId = "projectId") "FcmSender" should { diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala new file mode 100644 index 000000000..e43a4d242 --- /dev/null +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +package org.apache.pekko.stream.connectors.jms.impl + +import org.apache.pekko +import pekko.annotation.InternalApi +import pekko.stream.Materializer +import pekko.stream.connectors.jms.Destination + +import scala.concurrent.duration.FiniteDuration + +/** + * Exposes some protected methods from [[pekko.stream.stage.GraphStage]] + * that are not accessible when using Scala3 compiler. + */ +@InternalApi +private[impl] trait GraphStageCompanion { + def graphStageMaterializer: Materializer + + def graphStageDestination: Destination + + def scheduleOnceOnGraphStage(timerKey: Any, delay: FiniteDuration): Unit + + def isTimerActiveOnGraphStage(timerKey: Any): Boolean + + def cancelTimerOnGraphStage(timerKey: Any): Unit +} diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala index 4665463dc..16c2a0da0 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsAckSourceStage.scala @@ -49,7 +49,7 @@ private[jms] final class JmsAckSourceStage(settings: JmsConsumerSettings, destin createDestination: jms.Session => javax.jms.Destination): JmsAckSession = { val session = connection.createSession(false, settings.acknowledgeMode.getOrElse(AcknowledgeMode.ClientAcknowledge).mode) - new JmsAckSession(connection, session, createDestination(session), destination, maxPendingAcks) + new JmsAckSession(connection, session, createDestination(session), graphStageDestination, maxPendingAcks) } protected def pushMessage(msg: AckEnvelope): Unit = push(out, msg) diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala index 414462769..5fa898724 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConnector.scala @@ -37,7 +37,7 @@ import scala.util.{ Failure, Success, Try } */ @InternalApi private[jms] trait JmsConnector[S <: JmsSession] { - this: TimerGraphStageLogic with StageLogging => + this: TimerGraphStageLogic with GraphStageCompanion with StageLogging => import JmsConnector._ @@ -71,12 +71,12 @@ private[jms] trait JmsConnector[S <: JmsSession] { Source .queue[InternalConnectionState](2, OverflowStrategy.dropHead) .toMat(BroadcastHub.sink(1))(Keep.both) - .run()(this.materializer) + .run()(graphStageMaterializer) connectionStateQueue = queue connectionStateSourcePromise.complete(Success(source)) // add subscription to purge queued connection status events after the configured timeout. - scheduleOnce(ConnectionStatusTimeout, jmsSettings.connectionStatusSubscriptionTimeout) + scheduleOnceOnGraphStage(ConnectionStatusTimeout, jmsSettings.connectionStatusSubscriptionTimeout) } protected def finishStop(): Unit = { @@ -91,7 +91,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { closeSessions() val previous = updateStateWith(update) closeConnectionAsync(connection(previous)) - if (isTimerActive("connection-status-timeout")) drainConnectionState() + if (isTimerActiveOnGraphStage("connection-status-timeout")) drainConnectionState() connectionStateQueue.complete() } @@ -219,7 +219,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { closeConnectionAsync(connection(status)) val delay = if (backoffMaxed) maxBackoff else waitTime(nextAttempt) val backoffNowMaxed = backoffMaxed || delay == maxBackoff - scheduleOnce(AttemptConnect(nextAttempt, backoffNowMaxed), delay) + scheduleOnceOnGraphStage(AttemptConnect(nextAttempt, backoffNowMaxed), delay) } } @@ -234,7 +234,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { } private def drainConnectionState(): Unit = - Source.future(connectionStateSource).flatMapConcat(identity).runWith(Sink.ignore)(this.materializer) + Source.future(connectionStateSource).flatMapConcat(identity).runWith(Sink.ignore)(graphStageMaterializer) protected def executionContext(attributes: Attributes): ExecutionContext = { val dispatcherId = (attributes.get[ActorAttributes.Dispatcher](ActorAttributes.IODispatcher) match { @@ -244,11 +244,11 @@ private[jms] trait JmsConnector[S <: JmsSession] { }) match { case d @ ActorAttributes.IODispatcher => // this one is not a dispatcher id, but is a config path pointing to the dispatcher id - materializer.system.settings.config.getString(d.dispatcher) + graphStageMaterializer.system.settings.config.getString(d.dispatcher) case d => d.dispatcher } - materializer.system.dispatchers.lookup(dispatcherId) + graphStageMaterializer.system.dispatchers.lookup(dispatcherId) } protected def createSession(connection: jms.Connection, createDestination: jms.Session => jms.Destination): S @@ -324,7 +324,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { private def cancelAckTimers(s: JmsSession): Unit = s match { case session: JmsAckSession => - cancelTimer(FlushAcknowledgementsTimerKey(session)) + cancelTimerOnGraphStage(FlushAcknowledgementsTimerKey(session)) case _ => () } @@ -341,7 +341,7 @@ private[jms] trait JmsConnector[S <: JmsSession] { } private def openConnection(attempt: Int, backoffMaxed: Boolean): Future[jms.Connection] = { - implicit val system: ActorSystem = materializer.system + implicit val system: ActorSystem = graphStageMaterializer.system val jmsConnection = openConnectionAttempt(startConnection) updateState(JmsConnectorInitializing(jmsConnection, attempt, backoffMaxed, 0)) jmsConnection.map { connection => diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala index 929296655..211995b7c 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsConsumerStage.scala @@ -52,7 +52,7 @@ private[jms] final class JmsConsumerStage(settings: JmsConsumerSettings, destina createDestination: jms.Session => javax.jms.Destination): JmsConsumerSession = { val session = connection.createSession(false, settings.acknowledgeMode.getOrElse(AcknowledgeMode.AutoAcknowledge).mode) - new JmsConsumerSession(connection, session, createDestination(session), destination) + new JmsConsumerSession(connection, session, createDestination(session), graphStageDestination) } protected def pushMessage(msg: jms.Message): Unit = { diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala index 840f64c3a..b0bcb5daa 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsProducerStage.scala @@ -23,9 +23,10 @@ import pekko.stream.impl.Buffer import pekko.stream.scaladsl.Source import pekko.stream.stage._ import pekko.util.OptionVal -import javax.jms +import javax.jms import scala.concurrent.Future +import scala.concurrent.duration.FiniteDuration import scala.util.control.NoStackTrace import scala.util.{ Failure, Success, Try } @@ -34,7 +35,7 @@ import scala.util.{ Failure, Success, Try } */ @InternalApi private trait JmsProducerConnector extends JmsConnector[JmsProducerSession] { - this: TimerGraphStageLogic with StageLogging => + this: TimerGraphStageLogic with GraphStageCompanion with StageLogging => protected final def createSession(connection: jms.Connection, createDestination: jms.Session => jms.Destination): JmsProducerSession = { @@ -74,7 +75,18 @@ private[jms] final class JmsProducerStage[E <: JmsEnvelope[PassThrough], PassThr } private def producerLogic(inheritedAttributes: Attributes) = - new TimerGraphStageLogic(shape) with JmsProducerConnector with StageLogging { + new TimerGraphStageLogic(shape) with JmsProducerConnector with GraphStageCompanion with StageLogging { + + final override def graphStageMaterializer: Materializer = materializer + + final override def graphStageDestination: Destination = destination + + final override def scheduleOnceOnGraphStage(timerKey: Any, delay: FiniteDuration): Unit = + scheduleOnce(timerKey, delay) + + final override def isTimerActiveOnGraphStage(timerKey: Any): Boolean = isTimerActive(timerKey) + + final override def cancelTimerOnGraphStage(timerKey: Any): Unit = cancelTimer(timerKey) /* * NOTE: the following code is heavily inspired by org.apache.pekko.stream.impl.fusing.MapAsync diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala index 18278e22a..f6db22736 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/JmsTxSourceStage.scala @@ -46,7 +46,7 @@ private[jms] final class JmsTxSourceStage(settings: JmsConsumerSettings, destina protected def createSession(connection: jms.Connection, createDestination: jms.Session => javax.jms.Destination) = { val session = connection.createSession(true, settings.acknowledgeMode.getOrElse(AcknowledgeMode.SessionTransacted).mode) - new JmsConsumerSession(connection, session, createDestination(session), destination) + new JmsConsumerSession(connection, session, createDestination(session), graphStageDestination) } protected def pushMessage(msg: TxEnvelope): Unit = push(out, msg) diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala index 7debd190f..ac8266280 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/SourceStageLogic.scala @@ -13,28 +13,27 @@ package org.apache.pekko.stream.connectors.jms.impl -import java.util.concurrent.atomic.AtomicBoolean - import org.apache.pekko import pekko.annotation.InternalApi import pekko.stream.connectors.jms.impl.InternalConnectionState.JmsConnectorStopping import pekko.stream.connectors.jms.{ Destination, JmsConsumerSettings } import pekko.stream.scaladsl.Source import pekko.stream.stage.{ OutHandler, StageLogging, TimerGraphStageLogic } -import pekko.stream.{ Attributes, Outlet, SourceShape } +import pekko.stream.{ Attributes, Materializer, Outlet, SourceShape } import pekko.{ Done, NotUsed } +import java.util.concurrent.atomic.AtomicBoolean +import javax.jms import scala.collection.mutable import scala.util.{ Failure, Success } - -import javax.jms +import scala.concurrent.duration.FiniteDuration /** * Internal API. */ @InternalApi private trait JmsConsumerConnector extends JmsConnector[JmsConsumerSession] { - this: TimerGraphStageLogic with StageLogging => + this: TimerGraphStageLogic with GraphStageCompanion with StageLogging => override val startConnection = true @@ -54,8 +53,20 @@ private abstract class SourceStageLogic[T](shape: SourceShape[T], inheritedAttributes: Attributes) extends TimerGraphStageLogic(shape) with JmsConsumerConnector + with GraphStageCompanion with StageLogging { + final override def graphStageMaterializer: Materializer = materializer + + final override def graphStageDestination: Destination = destination + + final override def scheduleOnceOnGraphStage(timerKey: Any, delay: FiniteDuration): Unit = + scheduleOnce(timerKey, delay) + + final override def isTimerActiveOnGraphStage(timerKey: Any): Boolean = isTimerActive(timerKey) + + final override def cancelTimerOnGraphStage(timerKey: Any): Unit = cancelTimer(timerKey) + override protected def jmsSettings: JmsConsumerSettings = settings private val queue = mutable.Queue[T]() private val stopping = new AtomicBoolean(false) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 1a1f851c9..110560467 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -102,6 +102,7 @@ object Dependencies { ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito) val AzureStorageQueue = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.microsoft.azure" % "azure-storage" % "8.0.0")) @@ -110,6 +111,7 @@ object Dependencies { val CassandraDriverVersionInDocs = "4.15" val Cassandra = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("com.datastax.oss" % "java-driver-core" % CassandraDriverVersion) .exclude("com.github.spotbugs", "spotbugs-annotations") @@ -118,6 +120,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided)) val Couchbase = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.couchbase.client" % "java-client" % CouchbaseVersion, // ApacheV2 "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", // ApacheV2 @@ -145,7 +148,6 @@ object Dependencies { "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion)) val Elasticsearch = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -186,6 +188,7 @@ object Dependencies { "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies) val GoogleCommon = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -195,6 +198,7 @@ object Dependencies { ) ++ Mockito) val GoogleBigQuery = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-jackson" % PekkoHttpVersion % Provided, @@ -220,6 +224,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion) ++ Mockito) val GooglePubSub = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -237,6 +242,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) val GoogleFcm = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion) ++ Mockito) @@ -280,6 +286,7 @@ object Dependencies { "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) val HuaweiPushKit = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -298,7 +305,6 @@ object Dependencies { "org.mdedetrich" %% "pekko-http-circe" % "1.0.0")) val Jms = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "javax.jms" % "jms" % "1.1" % Provided, "com.ibm.mq" % "com.ibm.mq.allclient" % "9.2.5.0" % Test, From 1d6a97d0568f8d715b10cde33c4208ef3f2fe28e Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 10 Jun 2023 12:45:41 +0100 Subject: [PATCH 21/90] support ironmq on scala3 (#153) --- .../ironmq/scaladsl/IronMqProducerSpec.scala | 10 +++++----- project/Dependencies.scala | 1 - 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/scaladsl/IronMqProducerSpec.scala b/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/scaladsl/IronMqProducerSpec.scala index 677593bd2..1e92241b3 100644 --- a/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/scaladsl/IronMqProducerSpec.scala +++ b/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/scaladsl/IronMqProducerSpec.scala @@ -80,11 +80,11 @@ class IronMqProducerSpec extends IronMqSpec { new MockCommittable, new MockCommittable) - whenReady( - messages - .zip(Source(committables)) - .via(atLeastOnceFlow(queue, settings, Flow[Committable].mapAsync(1)(_.commit()))) - .runWith(Sink.ignore)) { _ => + val future: Future[Done] = messages + .zip(Source(committables)) + .via(atLeastOnceFlow(queue, settings, Flow[Committable].mapAsync(1)(_.commit()))) + .runWith(Sink.ignore) + whenReady(future) { _ => committables.forall(_.committed) shouldBe true } } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 110560467..7911b958d 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -298,7 +298,6 @@ object Dependencies { "org.influxdb" % "influxdb-java" % InfluxDBJavaVersion)) val IronMq = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.mdedetrich" %% "pekko-stream-circe" % "1.0.0", From 8b7a7a5253774084614499344a1adb09e00e9254 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 10 Jun 2023 13:34:21 +0100 Subject: [PATCH 22/90] scala3 support for hdfs (#156) --- project/Dependencies.scala | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 7911b958d..3d2f90082 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -272,11 +272,10 @@ object Dependencies { val HadoopVersion = "3.2.1" val Hdfs = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("org.apache.hadoop" % "hadoop-client" % HadoopVersion).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), - "org.typelevel" %% "cats-core" % "2.0.0", + "slf4j-log4j12"), // ApacheV2 + "org.typelevel" %% "cats-core" % "2.9.0", // MIT, ("org.apache.hadoop" % "hadoop-hdfs" % HadoopVersion % Test).exclude("log4j", "log4j").exclude("org.slf4j", "slf4j-log4j12"), ("org.apache.hadoop" % "hadoop-common" % HadoopVersion % Test).exclude("log4j", "log4j").exclude("org.slf4j", From c22f80dc93affebe9c1071947aa5a797bb152826 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 10 Jun 2023 11:28:17 +0100 Subject: [PATCH 23/90] support scala3 in mqtt-streaming (#161) Update MqttFlowSpec.scala Update MqttFlowSpec.scala Update MqttFlowSpec.scala Update MqttFlowSpec.scala remove val declarations --- .../connectors/mqtt/streaming/model.scala | 18 +++++++++++------- .../mqtt/streaming/scaladsl/MqttSession.scala | 4 ++-- .../scala/docs/scaladsl/MqttFlowSpec.scala | 17 ++++++++--------- project/Dependencies.scala | 1 - 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala index 64089ba3e..f52addaed 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/model.scala @@ -53,7 +53,8 @@ object ControlPacketType { val DISCONNECT = ControlPacketType(14) val Reserved2 = ControlPacketType(15) } -final case class ControlPacketType private (underlying: Int) extends AnyVal +@InternalApi +final case class ControlPacketType(underlying: Int) extends AnyVal /** * 2.2.2 Flags @@ -75,7 +76,8 @@ object ControlPacketFlags { val RETAIN = ControlPacketFlags(1) } -final case class ControlPacketFlags private (underlying: Int) extends AnyVal { +@InternalApi +final case class ControlPacketFlags(underlying: Int) extends AnyVal { /** * Convenience bitwise OR @@ -110,7 +112,8 @@ case object Reserved2 extends ControlPacket(ControlPacketType.Reserved2, Control * 2.3.1 Packet Identifier * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html */ -final case class PacketId private (underlying: Int) extends AnyVal +@InternalApi +final case class PacketId(underlying: Int) extends AnyVal object ConnectFlags { val None = ConnectFlags(0) @@ -127,7 +130,7 @@ object ConnectFlags { * 3.1.2.3 Connect Flags * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html */ -final case class ConnectFlags private (underlying: Int) extends AnyVal { +final case class ConnectFlags private[streaming] (underlying: Int) extends AnyVal { /** * Convenience bitwise OR @@ -223,7 +226,7 @@ object ConnAckFlags { * 3.2.2.1 Connect Acknowledge Flags * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html */ -final case class ConnAckFlags private (underlying: Int) extends AnyVal +final case class ConnAckFlags private[streaming] (underlying: Int) extends AnyVal object ConnAckReturnCode { val ConnectionAccepted = ConnAckReturnCode(0) @@ -238,7 +241,7 @@ object ConnAckReturnCode { * 3.2.2.3 Connect Return code * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html */ -final case class ConnAckReturnCode private (underlying: Int) extends AnyVal { +final case class ConnAckReturnCode private[streaming] (underlying: Int) extends AnyVal { /** * Convenience bitwise OR @@ -286,7 +289,8 @@ object Publish { * 3.3 PUBLISH – Publish message * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html */ -final case class Publish @InternalApi private[streaming] (override val flags: ControlPacketFlags, +@InternalApi +final case class Publish(override val flags: ControlPacketFlags, topicName: String, packetId: Option[PacketId], payload: ByteString) diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/MqttSession.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/MqttSession.scala index afbfc222e..111eb7f42 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/MqttSession.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/scaladsl/MqttSession.scala @@ -169,7 +169,7 @@ final class ActorMqttClientSession(settings: MqttSessionSettings)(implicit syste import MqttSession._ import system.dispatcher - private implicit val loggingAdapter: LoggingAdapter = Logging(system, this.getClass) + private implicit val loggingAdapter: LoggingAdapter = Logging(system, classOf[ActorMqttClientSession]) override def ![A](cp: Command[A]): Unit = cp match { case Command(cp: Publish, _, carry) => @@ -504,7 +504,7 @@ final class ActorMqttServerSession(settings: MqttSessionSettings)(implicit syste import MqttSession._ import system.dispatcher - private implicit val loggingAdapter: LoggingAdapter = Logging(system, this.getClass) + private implicit val loggingAdapter: LoggingAdapter = Logging(system, classOf[ActorMqttClientSession]) override def ![A](cp: Command[A]): Unit = cp match { case Command(cp: Publish, _, carry) => diff --git a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttFlowSpec.scala b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttFlowSpec.scala index 190dc0afa..fd73bfa50 100644 --- a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttFlowSpec.scala +++ b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttFlowSpec.scala @@ -36,20 +36,17 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike class UntypedMqttFlowSpec - extends ParametrizedTestKit("untyped-flow-spec/flow", + extends MqttFlowSpecBase("untyped-flow-spec/flow", "untyped-flow-spec/topic1", ActorSystem("UntypedMqttFlowSpec")) - with MqttFlowSpec + class TypedMqttFlowSpec - extends ParametrizedTestKit("typed-flow-spec/flow", + extends MqttFlowSpecBase("typed-flow-spec/flow", "typed-flow-spec/topic1", - org.apache.pekko.actor.typed.ActorSystem(Behaviors.ignore, "TypedMqttFlowSpec").toClassic) - with MqttFlowSpec - -class ParametrizedTestKit(val clientId: String, val topic: String, system: ActorSystem) extends TestKit(system) + pekko.actor.typed.ActorSystem(Behaviors.ignore, "TypedMqttFlowSpec").toClassic) -trait MqttFlowSpec extends AnyWordSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures with LogCapturing { - self: ParametrizedTestKit => +abstract class MqttFlowSpecBase(clientId: String, topic: String, system: ActorSystem) extends TestKit(system) + with AnyWordSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures with LogCapturing { override def sourceActorSytem = Some(system.name) @@ -57,6 +54,8 @@ trait MqttFlowSpec extends AnyWordSpecLike with Matchers with BeforeAndAfterAll private implicit val dispatcherExecutionContext: ExecutionContext = system.dispatcher + private implicit val implicitSystem: ActorSystem = system + implicit val logAdapter: LoggingAdapter = Logging(system, this.getClass.getName) override def afterAll(): Unit = diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 3d2f90082..dabba7361 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -345,7 +345,6 @@ object Dependencies { "org.eclipse.paho" % "org.eclipse.paho.client.mqttv3" % "1.2.5")) val MqttStreaming = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-actor-typed" % PekkoVersion, "org.apache.pekko" %% "pekko-actor-testkit-typed" % PekkoVersion % Test, From 54f3c4b68023ee682cd44fd5b84724d424b9abc6 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 10 Jun 2023 15:19:46 +0100 Subject: [PATCH 24/90] support scala3 on unix-domain connector (#162) format --- project/Dependencies.scala | 1 - .../unixdomainsocket/impl/UnixDomainSocketImpl.scala | 7 ++++--- .../unixdomainsocket/scaladsl/UnixDomainSocket.scala | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index dabba7361..1b4f326d9 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -450,7 +450,6 @@ object Dependencies { "org.apache.pekko" %% "pekko-http-testkit" % PekkoHttpVersion % Test)) val UnixDomainSocket = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.github.jnr" % "jffi" % "1.3.1", // classifier "complete", // Is the classifier needed anymore? "com.github.jnr" % "jnr-unixsocket" % "0.38.5")) diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala index d5c7020b0..c2737e94c 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala @@ -17,7 +17,7 @@ package impl import org.apache.pekko import pekko.actor.{ Cancellable, CoordinatedShutdown, ExtendedActorSystem, Extension } import pekko.annotation.InternalApi -import pekko.event.{ Logging, LoggingAdapter } +import pekko.event.{ LogSource, Logging, LoggingAdapter } import pekko.stream._ import pekko.stream.connectors.unixdomainsocket.scaladsl.UnixDomainSocket.{ IncomingConnection, @@ -371,9 +371,10 @@ private[unixdomainsocket] abstract class UnixDomainSocketImpl(system: ExtendedAc private val sel = NativeSelectorProvider.getInstance.openSelector /** Override to customise reported log source */ - protected def logSource: Class[_] = this.getClass + protected def logSource: Class[_] = getClass - private val ioThread = new Thread(() => nioEventLoop(sel, Logging(system, logSource)), "unix-domain-socket-io") + private val ioThread = + new Thread(() => nioEventLoop(sel, Logging(system, logSource.getName)), "unix-domain-socket-io") ioThread.start() CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseServiceStop, "stopUnixDomainSocket") { () => diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala index 7927d922b..5357bdaf5 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala @@ -182,5 +182,5 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends UnixDomainSock * for example using the [[pekko.stream.scaladsl.Framing]] stages. */ def outgoingConnection(path: Path): Flow[ByteString, ByteString, Future[OutgoingConnection]] = - super.outgoingConnection(UnixSocketAddress(path)) + super.outgoingConnection(UnixSocketAddress(path), None, true, Duration.Inf) } From 34d15f589e5974e19cc81940f639004fdba59c3e Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 10 Jun 2023 17:21:28 +0100 Subject: [PATCH 25/90] support scala3 for google-cloud-storage connector (#159) * support scala3 for google-cloud-storage connector fix import Update Formats.scala Update Dependencies.scala * try to fix code that was somehow reverted in a merge * more changes * Update GCStorageStream.scala * clock implicits * another merge issue --- .../scaladsl/GrpcBigQueryStorageReader.scala | 2 +- .../scala/docs/scaladsl/ExampleReader.scala | 2 +- .../test/scala/docs/scaladsl/ExampleApp.scala | 2 +- .../googlecloud/pubsub/impl/PubSubApi.scala | 34 ++++++----- .../googlecloud/storage/impl/Formats.scala | 14 +++-- .../storage/impl/GCStorageStream.scala | 5 +- .../storage/WithMaterializerGlobal.scala | 2 +- .../connectors/google/ResumableUpload.scala | 58 +++++++++---------- .../google/auth/GoogleOAuth2Spec.scala | 2 +- .../google/auth/OAuth2CredentialsSpec.scala | 2 +- .../firebase/fcm/v1/impl/FcmSenderSpec.scala | 2 +- project/Dependencies.scala | 5 -- 12 files changed, 66 insertions(+), 64 deletions(-) diff --git a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala index d2d541424..19370175f 100644 --- a/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala +++ b/google-cloud-bigquery-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/storage/scaladsl/GrpcBigQueryStorageReader.scala @@ -44,7 +44,7 @@ object GrpcBigQueryStorageReader { * An extension that manages a single gRPC scala reader client per actor system. */ final class GrpcBigQueryStorageReaderExt private (sys: ExtendedActorSystem) extends Extension { - implicit val reader = GrpcBigQueryStorageReader()(sys) + implicit val reader: GrpcBigQueryStorageReader = GrpcBigQueryStorageReader()(sys) } object GrpcBigQueryStorageReaderExt extends ExtensionId[GrpcBigQueryStorageReaderExt] with ExtensionIdProvider { diff --git a/google-cloud-bigquery-storage/src/test/scala/docs/scaladsl/ExampleReader.scala b/google-cloud-bigquery-storage/src/test/scala/docs/scaladsl/ExampleReader.scala index 268cdb330..92a6d74d6 100644 --- a/google-cloud-bigquery-storage/src/test/scala/docs/scaladsl/ExampleReader.scala +++ b/google-cloud-bigquery-storage/src/test/scala/docs/scaladsl/ExampleReader.scala @@ -40,7 +40,7 @@ import scala.concurrent.Future class ExampleReader { - implicit val sys = ActorSystem("ExampleReader") + implicit val sys: ActorSystem = ActorSystem("ExampleReader") // #read-all val sourceOfSources: Source[(ReadSession.Schema, Seq[Source[ReadRowsResponse.Rows, NotUsed]]), Future[NotUsed]] = diff --git a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala index 3ff4851c6..d76e43398 100644 --- a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala +++ b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/ExampleApp.scala @@ -37,7 +37,7 @@ object ExampleApp { |pekko.loglevel = INFO """.stripMargin) - implicit val sys = ActorSystem("ExampleApp", config) + implicit val sys: ActorSystem = ActorSystem("ExampleApp", config) import sys.dispatcher diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala index 696406f89..90dd33581 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala @@ -69,7 +69,7 @@ private[pubsub] trait PubSubApi { def PubSubGoogleApisPort: Int def isEmulated: Boolean - private implicit val instantFormat = new RootJsonFormat[Instant] { + private implicit val instantFormat: RootJsonFormat[Instant] = new RootJsonFormat[Instant] { override def read(jsValue: JsValue): Instant = jsValue match { case JsString(time) => Instant.parse(time) case _ => deserializationError("Instant required as a string of RFC3339 UTC Zulu format.") @@ -77,7 +77,7 @@ private[pubsub] trait PubSubApi { override def write(instant: Instant): JsValue = JsString(instant.toString) } - private implicit val pubSubMessageFormat = + private implicit val pubSubMessageFormat: RootJsonFormat[PubSubMessage] = new RootJsonFormat[PubSubMessage] { override def read(json: JsValue): PubSubMessage = { val fields = json.asJsObject.fields @@ -98,7 +98,7 @@ private[pubsub] trait PubSubApi { ++ m.attributes.map(attributes => "attributes" -> attributes.toJson): _*) } - private implicit val publishMessageFormat = new RootJsonFormat[PublishMessage] { + private implicit val publishMessageFormat: RootJsonFormat[PublishMessage] = new RootJsonFormat[PublishMessage] { def read(json: JsValue): PublishMessage = { val data = json.asJsObject.fields("data").convertTo[String] val attributes = json.asJsObject.fields("attributes").convertTo[immutable.Map[String, String]] @@ -112,37 +112,39 @@ private[pubsub] trait PubSubApi { m.attributes.map(a => "attributes" -> a.toJson): _*) } - private implicit val pubSubRequestFormat = new RootJsonFormat[PublishRequest] { + private implicit val pubSubRequestFormat: RootJsonFormat[PublishRequest] = new RootJsonFormat[PublishRequest] { def read(json: JsValue): PublishRequest = PublishRequest(json.asJsObject.fields("messages").convertTo[immutable.Seq[PublishMessage]]) def write(pr: PublishRequest): JsValue = JsObject("messages" -> pr.messages.toJson) } - private implicit val gcePubSubResponseFormat = new RootJsonFormat[PublishResponse] { + private implicit val gcePubSubResponseFormat: RootJsonFormat[PublishResponse] = new RootJsonFormat[PublishResponse] { def read(json: JsValue): PublishResponse = PublishResponse(json.asJsObject.fields("messageIds").convertTo[immutable.Seq[String]]) def write(pr: PublishResponse): JsValue = JsObject("messageIds" -> pr.messageIds.toJson) } - private implicit val receivedMessageFormat = new RootJsonFormat[ReceivedMessage] { + private implicit val receivedMessageFormat: RootJsonFormat[ReceivedMessage] = new RootJsonFormat[ReceivedMessage] { def read(json: JsValue): ReceivedMessage = ReceivedMessage(json.asJsObject.fields("ackId").convertTo[String], json.asJsObject.fields("message").convertTo[PubSubMessage]) def write(rm: ReceivedMessage): JsValue = JsObject("ackId" -> rm.ackId.toJson, "message" -> rm.message.toJson) } - private implicit val pubSubPullResponseFormat = new RootJsonFormat[PullResponse] { + private implicit val pubSubPullResponseFormat: RootJsonFormat[PullResponse] = new RootJsonFormat[PullResponse] { def read(json: JsValue): PullResponse = PullResponse(json.asJsObject.fields.get("receivedMessages").map(_.convertTo[immutable.Seq[ReceivedMessage]])) def write(pr: PullResponse): JsValue = pr.receivedMessages.map(rm => JsObject("receivedMessages" -> rm.toJson)).getOrElse(JsObject.empty) } - private implicit val acknowledgeRequestFormat = new RootJsonFormat[AcknowledgeRequest] { - def read(json: JsValue): AcknowledgeRequest = - AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) - def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) - } - private implicit val pullRequestFormat = DefaultJsonProtocol.jsonFormat2(PullRequest.apply) + private implicit val acknowledgeRequestFormat: RootJsonFormat[AcknowledgeRequest] = + new RootJsonFormat[AcknowledgeRequest] { + def read(json: JsValue): AcknowledgeRequest = + AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) + def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) + } + private implicit val pullRequestFormat: RootJsonFormat[PullRequest] = + DefaultJsonProtocol.jsonFormat2(PullRequest.apply) private def scheme: String = if (isEmulated) "http" else "https" @@ -173,7 +175,7 @@ private[pubsub] trait PubSubApi { .mapMaterializedValue(_ => NotUsed) private implicit val pullResponseUnmarshaller: FromResponseUnmarshaller[PullResponse] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case StatusCodes.Success(_) if response.entity.contentType == ContentTypes.`application/json` => Unmarshal(response.entity).to[PullResponse] @@ -211,7 +213,7 @@ private[pubsub] trait PubSubApi { .mapMaterializedValue(_ => NotUsed) private implicit val acknowledgeResponseUnmarshaller: FromResponseUnmarshaller[Done] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case StatusCodes.Success(_) => response.discardEntityBytes().future @@ -261,7 +263,7 @@ private[pubsub] trait PubSubApi { publish(topic, parallelism, None) private implicit val publishResponseUnmarshaller: FromResponseUnmarshaller[PublishResponse] = - Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case StatusCodes.Success(_) if response.entity.contentType == ContentTypes.`application/json` => Unmarshal(response.entity).to[PublishResponse] diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala index 5c20c3931..70a4306dc 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala @@ -18,7 +18,7 @@ import java.time.OffsetDateTime import org.apache.pekko import pekko.http.scaladsl.model.{ ContentType, ContentTypes } import pekko.stream.connectors.googlecloud.storage._ -import spray.json.{ DefaultJsonProtocol, JsObject, JsValue, RootJsonFormat, RootJsonReader } +import spray.json.{ enrichAny, DefaultJsonProtocol, JsObject, JsValue, RootJsonFormat, RootJsonReader } import scala.util.Try @@ -49,7 +49,8 @@ object Formats extends DefaultJsonProtocol { domain: String, projectTeam: ProjectTeam, etag: String) - private implicit val ObjectAccessControlsJsonFormat = jsonFormat13(ObjectAccessControls) + private implicit val ObjectAccessControlsJsonFormat: RootJsonFormat[ObjectAccessControls] = + jsonFormat13(ObjectAccessControls.apply) /** * Google API storage response object @@ -79,7 +80,8 @@ object Formats extends DefaultJsonProtocol { timeStorageClassUpdated: String, updated: String) - private implicit val storageObjectReadOnlyJson = jsonFormat18(StorageObjectReadOnlyJson) + private implicit val storageObjectReadOnlyJson: RootJsonFormat[StorageObjectReadOnlyJson] = + jsonFormat18(StorageObjectReadOnlyJson.apply) // private sub class of StorageObjectJson used to workaround 22 field jsonFormat issue private final case class StorageObjectWriteableJson( @@ -98,7 +100,8 @@ object Formats extends DefaultJsonProtocol { temporaryHold: Option[Boolean], acl: Option[List[ObjectAccessControls]]) - private implicit val storageObjectWritableJson = jsonFormat14(StorageObjectWriteableJson) + private implicit val storageObjectWritableJson: RootJsonFormat[StorageObjectWriteableJson] = + jsonFormat14(StorageObjectWriteableJson.apply) private implicit object StorageObjectJsonFormat extends RootJsonFormat[StorageObjectJson] { override def read(value: JsValue): StorageObjectJson = { @@ -175,7 +178,8 @@ object Formats extends DefaultJsonProtocol { } } - private implicit val bucketListResultJsonReads = jsonFormat4(BucketListResultJson.apply) + private implicit val bucketListResultJsonReads: RootJsonFormat[BucketListResultJson] = + jsonFormat4(BucketListResultJson.apply) implicit object RewriteResponseReads extends RootJsonReader[RewriteResponse] { override def read(json: JsValue): RewriteResponse = { diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala index b67496ea3..63d55bd4b 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.googlecloud.storage.impl import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.dispatch.ExecutionContexts import pekko.dispatch.ExecutionContexts.parasitic @@ -289,7 +290,7 @@ import scala.concurrent.{ ExecutionContext, Future } @nowarn("msg=deprecated") private def resolveSettings(mat: Materializer, attr: Attributes) = { - implicit val sys = mat.system + implicit val sys: ActorSystem = mat.system val legacySettings = attr .get[GCStorageSettingsValue] .map(_.settings) @@ -334,7 +335,7 @@ import scala.concurrent.{ ExecutionContext, Future } } private def resolveGCSSettings(mat: Materializer, attr: Attributes): GCSSettings = { - implicit val sys = mat.system + implicit val sys: ActorSystem = mat.system attr .get[GCSSettingsValue] .map(_.settings) diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala index eb046720a..787885b91 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala @@ -29,7 +29,7 @@ trait WithMaterializerGlobal with ScalaFutures with IntegrationPatience with Matchers { - implicit val actorSystem = ActorSystem("test") + implicit val actorSystem: ActorSystem = ActorSystem("test") implicit val ec: ExecutionContext = actorSystem.dispatcher override protected def afterAll(): Unit = { diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index f50c921b9..8dbc91cc2 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -56,7 +56,7 @@ private[connectors] object ResumableUpload { Sink .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val materializer = mat + implicit val materializer: Materializer = mat implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uploadChunkSize = settings.requestSettings.uploadChunkSize @@ -96,25 +96,24 @@ private[connectors] object ResumableUpload { private def initiateSession(request: HttpRequest)(implicit mat: Materializer, settings: GoogleSettings): Future[Uri] = { - implicit val system: ActorSystem = mat.system import implicits._ - implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => - response.discardEntityBytes().future.map { _ => - response.header[Location].fold(throw InvalidResponseException(ErrorInfo("No Location header")))(_.uri) - } - }.withDefaultRetry + implicit val um: FromResponseUnmarshaller[Uri] = + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + response.discardEntityBytes().future.map { _ => + response.header[Location].fold(throw InvalidResponseException(ErrorInfo("No Location header")))(_.uri) + } + }.withDefaultRetry - GoogleHttp().singleAuthenticatedRequest[Uri](request) + GoogleHttp(mat.system).singleAuthenticatedRequest[Uri](request) } private final case class DoNotRetry(ex: Throwable) extends Throwable(ex) with NoStackTrace private def uploadChunk[T: FromResponseUnmarshaller]( request: HttpRequest)(implicit mat: Materializer): Flow[Either[T, MaybeLast[Chunk]], Try[Option[T]], NotUsed] = { - implicit val system: ActorSystem = mat.system - val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => + val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => response.status match { case PermanentRedirect => response.discardEntityBytes().future.map(_ => None) @@ -127,7 +126,8 @@ private[connectors] object ResumableUpload { val uri = request.uri Flow[HttpRequest] .map((_, ())) - .via(GoogleHttp().cachedHostConnectionPoolWithContext(uri.authority.host.address, uri.effectivePort)(um)) + .via(GoogleHttp(mat.system).cachedHostConnectionPoolWithContext(uri.authority.host.address, uri.effectivePort)( + um)) .map(_._1.recoverWith { case DoNotRetry(ex) => Failure(ex) }) } @@ -147,30 +147,30 @@ private[connectors] object ResumableUpload { request: HttpRequest, chunk: Future[MaybeLast[Chunk]])( implicit mat: Materializer, settings: GoogleSettings): Future[Either[T, MaybeLast[Chunk]]] = { - implicit val system: ActorSystem = mat.system import implicits._ - implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => - response.status match { - case OK | Created => Unmarshal(response).to[T].map(Left(_)) - case PermanentRedirect => - response.discardEntityBytes().future.map { _ => - Right( - response - .header[Range] - .flatMap(_.ranges.headOption) - .collect { - case Slice(_, last) => last + 1 - }.getOrElse(0L)) - } - case _ => throw InvalidResponseException(ErrorInfo(response.status.value, response.status.defaultMessage)) - } - }.withDefaultRetry + implicit val um: FromResponseUnmarshaller[Either[T, Long]] = + Unmarshaller.withMaterializer { implicit ec => implicit mat => (response: HttpResponse) => + response.status match { + case OK | Created => Unmarshal(response).to[T].map(Left(_)) + case PermanentRedirect => + response.discardEntityBytes().future.map { _ => + Right( + response + .header[Range] + .flatMap(_.ranges.headOption) + .collect { + case Slice(_, last) => last + 1 + }.getOrElse(0L)) + } + case _ => throw InvalidResponseException(ErrorInfo(response.status.value, response.status.defaultMessage)) + } + }.withDefaultRetry import mat.executionContext chunk.flatMap { case maybeLast @ MaybeLast(Chunk(bytes, position)) => - GoogleHttp() + GoogleHttp(mat.system) .singleAuthenticatedRequest[Either[T, Long]](request.addHeader(statusRequestHeader)) .map { case Left(result) if maybeLast.isLast => Left(result) diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala index 1bd56b790..753e3004a 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala @@ -46,7 +46,7 @@ class GoogleOAuth2Spec implicit val executionContext: ExecutionContext = system.dispatcher implicit val settings: GoogleSettings = GoogleSettings(system) - implicit val clock = Clock.systemUTC() + implicit val clock: Clock = Clock.systemUTC() lazy val privateKey = { val inputStream = getClass.getClassLoader.getResourceAsStream("private_pcks8.pem") diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala index 7d7e2342a..446336dd9 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala @@ -45,7 +45,7 @@ class OAuth2CredentialsSpec import system.dispatcher implicit val settings: RequestSettings = GoogleSettings().requestSettings - implicit val clock = Clock.systemUTC() + implicit val clock: Clock = Clock.systemUTC() final object AccessTokenProvider { @volatile var accessTokenPromise: Promise[AccessToken] = Promise.failed(new RuntimeException) diff --git a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala index a2d4d4922..ca6c18820 100644 --- a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala +++ b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala @@ -57,7 +57,7 @@ class FcmSenderSpec implicit val executionContext: ExecutionContext = system.dispatcher - implicit val conf = FcmSettings() + implicit val conf: FcmSettings = FcmSettings() implicit val settings: GoogleSettings = GoogleSettings().copy(projectId = "projectId") "FcmSender" should { diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 1b4f326d9..85f5d8177 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -188,7 +188,6 @@ object Dependencies { "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies) val GoogleCommon = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -198,7 +197,6 @@ object Dependencies { ) ++ Mockito) val GoogleBigQuery = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-jackson" % PekkoHttpVersion % Provided, @@ -224,7 +222,6 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion) ++ Mockito) val GooglePubSub = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -242,13 +239,11 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) val GoogleFcm = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion) ++ Mockito) val GoogleStorage = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, From f5fbaebd5957f86693dbc8d4d32d88423159526b Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 11 Jun 2023 08:41:40 +0100 Subject: [PATCH 26/90] support scala3 for amqp connector (#154) format --- .../amqp/AmqpConnectionProvider.scala | 54 ++++++++++--------- project/Dependencies.scala | 1 - 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala index 2b0dc6704..c6a3ca303 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/AmqpConnectionProvider.scala @@ -375,12 +375,14 @@ final class AmqpCachedConnectionProvider private (val provider: AmqpConnectionPr def withAutomaticRelease(automaticRelease: Boolean): AmqpCachedConnectionProvider = copy(automaticRelease = automaticRelease) + override def get: Connection = getRecursive(provider) + @tailrec - override def get: Connection = state.get match { + private def getRecursive(amqpConnectionProvider: AmqpConnectionProvider): Connection = state.get match { case Empty => if (state.compareAndSet(Empty, Connecting)) { try { - val connection = provider.get + val connection = amqpConnectionProvider.get if (!state.compareAndSet(Connecting, Connected(connection, 1))) throw new ConcurrentModificationException( "Unexpected concurrent modification while creating the connection.") @@ -391,34 +393,38 @@ final class AmqpCachedConnectionProvider private (val provider: AmqpConnectionPr state.compareAndSet(Connecting, Empty) throw e } - } else get - case Connecting => get + } else getRecursive(amqpConnectionProvider) + case Connecting => getRecursive(amqpConnectionProvider) case c @ Connected(connection, clients) => if (state.compareAndSet(c, Connected(connection, clients + 1))) connection - else get - case Closing => get + else getRecursive(amqpConnectionProvider) + case Closing => getRecursive(amqpConnectionProvider) } + override def release(connection: Connection): Unit = releaseRecursive(provider, connection) + @tailrec - override def release(connection: Connection): Unit = state.get match { - case Empty => throw new IllegalStateException("There is no connection to release.") - case Connecting => release(connection) - case c @ Connected(cachedConnection, clients) => - if (cachedConnection != connection) - throw new IllegalArgumentException("Can't release a connection that's not owned by this provider") - - if (clients == 1 || !automaticRelease) { - if (state.compareAndSet(c, Closing)) { - provider.release(connection) - if (!state.compareAndSet(Closing, Empty)) - throw new ConcurrentModificationException( - "Unexpected concurrent modification while closing the connection.") + private def releaseRecursive(amqpConnectionProvider: AmqpConnectionProvider, connection: Connection): Unit = + state.get match { + case Empty => throw new IllegalStateException("There is no connection to release.") + case Connecting => releaseRecursive(amqpConnectionProvider, connection) + case c @ Connected(cachedConnection, clients) => + if (cachedConnection != connection) + throw new IllegalArgumentException("Can't release a connection that's not owned by this provider") + + if (clients == 1 || !automaticRelease) { + if (state.compareAndSet(c, Closing)) { + amqpConnectionProvider.release(connection) + if (!state.compareAndSet(Closing, Empty)) + throw new ConcurrentModificationException( + "Unexpected concurrent modification while closing the connection.") + } + } else { + if (!state.compareAndSet(c, Connected(cachedConnection, clients - 1))) + releaseRecursive(amqpConnectionProvider, connection) } - } else { - if (!state.compareAndSet(c, Connected(cachedConnection, clients - 1))) release(connection) - } - case Closing => release(connection) - } + case Closing => releaseRecursive(amqpConnectionProvider, connection) + } private def copy(automaticRelease: Boolean): AmqpCachedConnectionProvider = new AmqpCachedConnectionProvider(provider, automaticRelease) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 85f5d8177..90e79dd29 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -88,7 +88,6 @@ object Dependencies { "com.fasterxml.jackson.core" % "jackson-databind" % JacksonDatabindVersion) val Amqp = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.rabbitmq" % "amqp-client" % "5.14.2") ++ Mockito) From 22332a085aebc8341e5edad49c36202d92a04127 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 11 Jun 2023 12:20:34 +0100 Subject: [PATCH 27/90] enable scala3 build for more google connectors (#165) * enable scala3 build for more google connectors * implicits * disable part of scala3 build * try full build again * Update GooglePubSub.scala * refactor suggested by @mdedetrich * remove interim variables * add comments * Update GooglePubSub.scala --- .../pubsub/grpc/javadsl/GooglePubSub.scala | 12 ++++++++++-- .../pubsub/grpc/javadsl/GrpcPublisher.scala | 2 +- .../pubsub/grpc/javadsl/GrpcSubscriber.scala | 2 +- .../pubsub/grpc/scaladsl/GrpcPublisher.scala | 2 +- .../pubsub/grpc/scaladsl/GrpcSubscriber.scala | 2 +- .../test/scala/docs/scaladsl/IntegrationSpec.scala | 2 +- .../stream/connectors/google/ResumableUpload.scala | 1 - project/Dependencies.scala | 2 -- 8 files changed, 15 insertions(+), 10 deletions(-) diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GooglePubSub.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GooglePubSub.scala index 930fa2bc7..b254474f0 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GooglePubSub.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GooglePubSub.scala @@ -69,7 +69,11 @@ object GooglePubSub { Source .tick(Duration.ZERO, pollInterval, subsequentRequest) .mapMaterializedValue(cancellable.complete(_)))) - .mapConcat(_.getReceivedMessagesList) + .mapConcat( + // TODO uptake any fix suggested for https://contributors.scala-lang.org/t/better-type-inference-for-scala-send-us-your-problematic-cases/2410/183 + ((response: StreamingPullResponse) => + response.getReceivedMessagesList): pekko.japi.function.Function[StreamingPullResponse, + java.util.List[ReceivedMessage]]) .mapMaterializedValue(_ => cancellable) } .mapMaterializedValue(flattenCs(_)) @@ -95,7 +99,11 @@ object GooglePubSub { Source .tick(Duration.ZERO, pollInterval, request) .mapAsync(1, client.pull(_)) - .mapConcat(_.getReceivedMessagesList) + .mapConcat( + // TODO uptake any fix suggested for https://contributors.scala-lang.org/t/better-type-inference-for-scala-send-us-your-problematic-cases/2410/183 + ((response: PullResponse) => + response.getReceivedMessagesList): pekko.japi.function.Function[PullResponse, + java.util.List[ReceivedMessage]]) .mapMaterializedValue(cancellable.complete(_)) .mapMaterializedValue(_ => cancellable) } diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala index 5dacfc18a..9fd70656a 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcPublisher.scala @@ -74,7 +74,7 @@ object GrpcPublisher { * An extension that manages a single gRPC java publisher client per actor system. */ final class GrpcPublisherExt private (sys: ExtendedActorSystem) extends Extension { - implicit val publisher = GrpcPublisher.create(sys) + implicit val publisher: GrpcPublisher = GrpcPublisher.create(sys) } object GrpcPublisherExt extends ExtensionId[GrpcPublisherExt] with ExtensionIdProvider { diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala index e66f95aac..de2e738ad 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/javadsl/GrpcSubscriber.scala @@ -75,7 +75,7 @@ object GrpcSubscriber { * An extension that manages a single gRPC java subscriber client per actor system. */ final class GrpcSubscriberExt private (sys: ExtendedActorSystem) extends Extension { - implicit val subscriber = GrpcSubscriber.create(sys) + implicit val subscriber: GrpcSubscriber = GrpcSubscriber.create(sys) } object GrpcSubscriberExt extends ExtensionId[GrpcSubscriberExt] with ExtensionIdProvider { diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala index 896cd60c6..286c6d78e 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcPublisher.scala @@ -65,7 +65,7 @@ object GrpcPublisher { * An extension that manages a single gRPC scala publisher client per actor system. */ final class GrpcPublisherExt private (sys: ExtendedActorSystem) extends Extension { - implicit val publisher = GrpcPublisher(sys: ActorSystem) + implicit val publisher: GrpcPublisher = GrpcPublisher(sys: ActorSystem) } object GrpcPublisherExt extends ExtensionId[GrpcPublisherExt] with ExtensionIdProvider { diff --git a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala index 955a3ed1b..0d70d861d 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/grpc/scaladsl/GrpcSubscriber.scala @@ -65,7 +65,7 @@ object GrpcSubscriber { * An extension that manages a single gRPC scala subscriber client per actor system. */ final class GrpcSubscriberExt private (sys: ExtendedActorSystem) extends Extension { - implicit val subscriber = GrpcSubscriber(sys: ActorSystem) + implicit val subscriber: GrpcSubscriber = GrpcSubscriber(sys: ActorSystem) } object GrpcSubscriberExt extends ExtensionId[GrpcSubscriberExt] with ExtensionIdProvider { diff --git a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala index 38eba06e4..fcea88765 100644 --- a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala +++ b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala @@ -52,7 +52,7 @@ class IntegrationSpec implicit val system: ActorSystem = ActorSystem("IntegrationSpec") - implicit val defaultPatience = PatienceConfig(timeout = 15.seconds, interval = 50.millis) + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 15.seconds, interval = 50.millis) "connector" should { diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index 8dbc91cc2..969cef4c1 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -14,7 +14,6 @@ package org.apache.pekko.stream.connectors.google import org.apache.pekko -import pekko.actor.ActorSystem import pekko.NotUsed import pekko.annotation.InternalApi import pekko.http.scaladsl.model.HttpMethods.{ POST, PUT } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 90e79dd29..ca8286650 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -205,7 +205,6 @@ object Dependencies { "com.fasterxml.jackson.datatype" % "jackson-datatype-jsr310" % JacksonDatabindVersion % Test, "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) val GoogleBigQueryStorage = Seq( - crossScalaVersions -= Scala3, // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-bigquerystorage/tree/master/proto-google-cloud-bigquerystorage-v1 @@ -227,7 +226,6 @@ object Dependencies { "com.github.tomakehurst" % "wiremock" % "2.27.2" % Test) ++ Mockito) val GooglePubSubGrpc = Seq( - crossScalaVersions -= Scala3, // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-pubsub/tree/master/proto-google-cloud-pubsub-v1/ From 02fe3197778150e7b5cdf5bb9a78ff575b6d2b39 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 11 Jun 2023 14:24:27 +0100 Subject: [PATCH 28/90] Scala3 ftp support (#170) * support scala3 in ftp connector * Update BaseSpec.scala --- .../connectors/ftp/impl/FtpBrowserGraphStage.scala | 6 +++--- .../impl/FtpDirectoryOperationsGraphStage.scala | 2 +- .../connectors/ftp/impl/FtpGraphStageLogic.scala | 10 +++++----- .../connectors/ftp/impl/FtpIOGraphStage.scala | 14 +++++++------- .../pekko/stream/connectors/ftp/impl/FtpLike.scala | 13 ++++++++----- .../stream/connectors/ftp/impl/FtpOperations.scala | 2 +- .../connectors/ftp/impl/FtpsOperations.scala | 2 +- .../connectors/ftp/impl/SftpOperations.scala | 2 +- .../stream/connectors/ftp/javadsl/FtpApi.scala | 4 ++-- .../stream/connectors/ftp/scaladsl/FtpApi.scala | 4 ++-- project/Dependencies.scala | 1 - 11 files changed, 31 insertions(+), 29 deletions(-) diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala index 769156909..589624aa4 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpBrowserGraphStage.scala @@ -31,7 +31,7 @@ private[ftp] trait FtpBrowserGraphStage[FtpClient, S <: RemoteFileSettings] def emitTraversedDirectories: Boolean = false - def createLogic(inheritedAttributes: Attributes) = { + def createLogic(inheritedAttributes: Attributes): FtpGraphStageLogic[FtpFile, FtpClient, S] = { val logic = new FtpGraphStageLogic[FtpFile, FtpClient, S](shape, ftpLike, connectionSettings, ftpClient) { private[this] var buffer: Seq[FtpFile] = Seq.empty[FtpFile] @@ -84,9 +84,9 @@ private[ftp] trait FtpBrowserGraphStage[FtpClient, S <: RemoteFileSettings] private[this] def getFilesFromPath(basePath: String) = if (basePath.isEmpty) - ftpLike.listFiles(handler.get) + graphStageFtpLike.listFiles(handler.get) else - ftpLike.listFiles(basePath, handler.get) + graphStageFtpLike.listFiles(basePath, handler.get) } // end of stage logic diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpDirectoryOperationsGraphStage.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpDirectoryOperationsGraphStage.scala index c24b0d066..2e80bd58a 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpDirectoryOperationsGraphStage.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpDirectoryOperationsGraphStage.scala @@ -31,7 +31,7 @@ private[ftp] trait FtpDirectoryOperationsGraphStage[FtpClient, S <: RemoteFileSe out, new OutHandler { override def onPull(): Unit = { - push(out, ftpLike.mkdir(basePath, directoryName, handler.get)) + push(out, graphStageFtpLike.mkdir(basePath, directoryName, handler.get)) complete(out) } }) diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala index 0090c3504..d248dc0bf 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpGraphStageLogic.scala @@ -29,18 +29,18 @@ import scala.util.control.NonFatal @InternalApi private[ftp] abstract class FtpGraphStageLogic[T, FtpClient, S <: RemoteFileSettings]( val shape: Shape, - val ftpLike: FtpLike[FtpClient, S], + val graphStageFtpLike: FtpLike[FtpClient, S], val connectionSettings: S, val ftpClient: () => FtpClient) extends GraphStageLogic(shape) { - protected[this] implicit val client = ftpClient() - protected[this] var handler: Option[ftpLike.Handler] = Option.empty[ftpLike.Handler] + protected[this] implicit val client: FtpClient = ftpClient() + protected[this] var handler: Option[graphStageFtpLike.Handler] = Option.empty[graphStageFtpLike.Handler] protected[this] var failed = false override def preStart(): Unit = { super.preStart() try { - val tryConnect = ftpLike.connect(connectionSettings) + val tryConnect = graphStageFtpLike.connect(connectionSettings) if (tryConnect.isSuccess) { handler = tryConnect.toOption } else @@ -76,7 +76,7 @@ private[ftp] abstract class FtpGraphStageLogic[T, FtpClient, S <: RemoteFileSett protected[this] def doPreStart(): Unit protected[this] def disconnect(): Unit = - handler.foreach(ftpLike.disconnect) + handler.foreach(graphStageFtpLike.disconnect) protected[this] def matSuccess(): Boolean diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala index 75414ade0..00cbc5e21 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpIOGraphStage.scala @@ -106,7 +106,7 @@ private[ftp] trait FtpIOSourceStage[FtpClient, S <: RemoteFileSettings] isOpt.foreach { os => try { os.close() - ftpLike match { + graphStageFtpLike match { case cfo: CommonFtpOperations => if (!cfo.completePendingCommand(handler.get.asInstanceOf[cfo.Handler])) throw new IOException("File transfer failed.") @@ -128,13 +128,13 @@ private[ftp] trait FtpIOSourceStage[FtpClient, S <: RemoteFileSettings] } protected[this] def doPreStart(): Unit = - isOpt = ftpLike match { + isOpt = graphStageFtpLike match { case ur: UnconfirmedReads => withUnconfirmedReads(ur) case ro: RetrieveOffset => Some(ro.retrieveFileInputStream(path, handler.get.asInstanceOf[ro.Handler], offset).get) case _ => - Some(ftpLike.retrieveFileInputStream(path, handler.get).get) + Some(graphStageFtpLike.retrieveFileInputStream(path, handler.get).get) } private def withUnconfirmedReads( @@ -229,7 +229,7 @@ private[ftp] trait FtpIOSinkStage[FtpClient, S <: RemoteFileSettings] osOpt.foreach { os => try { os.close() - ftpLike match { + graphStageFtpLike match { case cfo: CommonFtpOperations => if (!cfo.completePendingCommand(handler.get.asInstanceOf[cfo.Handler])) throw new IOException("File transfer failed.") @@ -251,7 +251,7 @@ private[ftp] trait FtpIOSinkStage[FtpClient, S <: RemoteFileSettings] } protected[this] def doPreStart(): Unit = { - osOpt = Some(ftpLike.storeFileOutputStream(path, handler.get, append).get) + osOpt = Some(graphStageFtpLike.storeFileOutputStream(path, handler.get, append).get) pull(in) } @@ -301,7 +301,7 @@ private[ftp] trait FtpMoveSink[FtpClient, S <: RemoteFileSettings] override def onPush(): Unit = { try { val sourcePath = grab(in) - ftpLike.move(sourcePath.path, destinationPath(sourcePath), handler.get) + graphStageFtpLike.move(sourcePath.path, destinationPath(sourcePath), handler.get) numberOfMovedFiles = numberOfMovedFiles + 1 pull(in) } catch { @@ -356,7 +356,7 @@ private[ftp] trait FtpRemoveSink[FtpClient, S <: RemoteFileSettings] new InHandler { override def onPush(): Unit = { try { - ftpLike.remove(grab(in).path, handler.get) + graphStageFtpLike.remove(grab(in).path, handler.get) numberOfRemovedFiles = numberOfRemovedFiles + 1 pull(in) } catch { diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpLike.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpLike.scala index d64b488fe..216d7a597 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpLike.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpLike.scala @@ -54,7 +54,7 @@ protected[ftp] trait FtpLike[FtpClient, S <: RemoteFileSettings] { * INTERNAL API */ @InternalApi -protected[ftp] trait RetrieveOffset { _: FtpLike[_, _] => +protected[ftp] trait RetrieveOffset { self: FtpLike[_, _] => def retrieveFileInputStream(name: String, handler: Handler, offset: Long): Try[InputStream] @@ -64,7 +64,7 @@ protected[ftp] trait RetrieveOffset { _: FtpLike[_, _] => * INTERNAL API */ @InternalApi -protected[ftp] trait UnconfirmedReads { _: FtpLike[_, _] => +protected[ftp] trait UnconfirmedReads { self: FtpLike[_, _] => def retrieveFileInputStream(name: String, handler: Handler, offset: Long, maxUnconfirmedReads: Int): Try[InputStream] @@ -76,8 +76,11 @@ protected[ftp] trait UnconfirmedReads { _: FtpLike[_, _] => @InternalApi object FtpLike { // type class instances - implicit val ftpLikeInstance = new FtpLike[FTPClient, FtpSettings] with RetrieveOffset with FtpOperations - implicit val ftpsLikeInstance = new FtpLike[FTPSClient, FtpsSettings] with RetrieveOffset with FtpsOperations - implicit val sFtpLikeInstance = + implicit val ftpLikeInstance: FtpLike[FTPClient, FtpSettings] with RetrieveOffset with FtpOperations = + new FtpLike[FTPClient, FtpSettings] with RetrieveOffset with FtpOperations + implicit val ftpsLikeInstance: FtpLike[FTPSClient, FtpsSettings] with RetrieveOffset with FtpsOperations = + new FtpLike[FTPSClient, FtpsSettings] with RetrieveOffset with FtpsOperations + implicit val sFtpLikeInstance + : FtpLike[SSHClient, SftpSettings] with RetrieveOffset with SftpOperations with UnconfirmedReads = new FtpLike[SSHClient, SftpSettings] with RetrieveOffset with SftpOperations with UnconfirmedReads } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala index e4c46e45d..de0990fd0 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpOperations.scala @@ -23,7 +23,7 @@ import scala.util.Try * INTERNAL API */ @InternalApi -private[ftp] trait FtpOperations extends CommonFtpOperations { _: FtpLike[FTPClient, FtpSettings] => +private[ftp] trait FtpOperations extends CommonFtpOperations { self: FtpLike[FTPClient, FtpSettings] => def connect(connectionSettings: FtpSettings)(implicit ftpClient: FTPClient): Try[Handler] = Try { connectionSettings.proxy.foreach(ftpClient.setProxy) diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala index 7c672f745..4fad15ca3 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala @@ -25,7 +25,7 @@ import scala.util.Try */ @InternalApi private[ftp] trait FtpsOperations extends CommonFtpOperations { - _: FtpLike[FTPSClient, FtpsSettings] => + self: FtpLike[FTPSClient, FtpsSettings] => def connect(connectionSettings: FtpsSettings)(implicit ftpClient: FTPSClient): Try[Handler] = Try { diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala index 59045f385..04cbd77bd 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/SftpOperations.scala @@ -37,7 +37,7 @@ import scala.util.{ Failure, Try } * INTERNAL API */ @InternalApi -private[ftp] trait SftpOperations { _: FtpLike[SSHClient, SftpSettings] => +private[ftp] trait SftpOperations { self: FtpLike[SSHClient, SftpSettings] => type Handler = SFTPClient diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala index 9d6c299b1..3644a1f4d 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/javadsl/FtpApi.scala @@ -30,7 +30,7 @@ import net.schmizz.sshj.SSHClient import org.apache.commons.net.ftp.{ FTPClient, FTPSClient } @DoNotInherit -sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { _: FtpSourceFactory[FtpClient, S] => +sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { self: FtpSourceFactory[FtpClient, S] => /** * Java API: creates a [[pekko.stream.javadsl.Source Source]] of [[FtpFile]]s from the remote user `root` directory. @@ -573,6 +573,6 @@ object Sftp extends SftpApi { */ def create(customSshClient: SSHClient): SftpApi = new SftpApi { - override val sshClient: SSHClient = customSshClient + override def sshClient(): SSHClient = customSshClient } } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/scaladsl/FtpApi.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/scaladsl/FtpApi.scala index 4bc7da6e4..0e574bffe 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/scaladsl/FtpApi.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/scaladsl/FtpApi.scala @@ -28,7 +28,7 @@ import org.apache.commons.net.ftp.{ FTPClient, FTPSClient } import scala.concurrent.Future @DoNotInherit -sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { _: FtpSourceFactory[FtpClient, S] => +sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { self: FtpSourceFactory[FtpClient, S] => /** * Scala API: creates a [[pekko.stream.scaladsl.Source Source]] of [[FtpFile]]s from the remote user `root` directory. @@ -398,6 +398,6 @@ object Sftp extends SftpApi { */ def apply(customSshClient: SSHClient): SftpApi = new SftpApi { - override val sshClient: SSHClient = customSshClient + override def sshClient(): SSHClient = customSshClient } } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index ca8286650..a2af1249d 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -169,7 +169,6 @@ object Dependencies { )) val Ftp = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "commons-net" % "commons-net" % "3.8.0", "com.hierynomus" % "sshj" % "0.33.0")) From 03709901ececc0df7ca772cc4f38a4e2c3dd9834 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 11 Jun 2023 16:48:34 +0100 Subject: [PATCH 29/90] more connectors that support scala3 (#172) --- project/Dependencies.scala | 3 --- 1 file changed, 3 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index a2af1249d..88c3dab13 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -110,7 +110,6 @@ object Dependencies { val CassandraDriverVersionInDocs = "4.15" val Cassandra = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("com.datastax.oss" % "java-driver-core" % CassandraDriverVersion) .exclude("com.github.spotbugs", "spotbugs-annotations") @@ -119,7 +118,6 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided)) val Couchbase = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.couchbase.client" % "java-client" % CouchbaseVersion, // ApacheV2 "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", // ApacheV2 @@ -276,7 +274,6 @@ object Dependencies { "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) val HuaweiPushKit = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, From 2c00326c3d90567ff3d22c88d604cb552706b9b9 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 12 Jun 2023 13:56:25 +0100 Subject: [PATCH 30/90] support scala3 in avroparquet (#158) * support scala3 in avroparquet add some scala3 tests Update AvroParquetSinkSpec.scala Update avroparquet.md Update AbstractAvroParquet.scala Update AbstractAvroParquet.scala * refactor test code * Update AbstractAvroParquetBase.scala * fix doc links * use Common.isScala3 * Update Dependencies.scala * Update build.sbt --- .../docs/scaladsl/AbstractAvroParquet.scala | 33 +++++++++++++++ .../docs/scaladsl/AbstractAvroParquet.scala | 41 +++++++++++++++++++ ...et.scala => AbstractAvroParquetBase.scala} | 26 +++--------- .../docs/scaladsl/AvroParquetFlowSpec.scala | 10 ++--- .../docs/scaladsl/AvroParquetSinkSpec.scala | 12 +++--- .../docs/scaladsl/AvroParquetSourceSpec.scala | 8 ++-- build.sbt | 11 +---- docs/src/main/paradox/avroparquet.md | 4 +- project/Common.scala | 2 +- project/Dependencies.scala | 15 ++++--- 10 files changed, 108 insertions(+), 54 deletions(-) create mode 100644 avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala create mode 100644 avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala rename avroparquet/src/test/scala/docs/scaladsl/{AbstractAvroParquet.scala => AbstractAvroParquetBase.scala} (88%) diff --git a/avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala b/avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala new file mode 100644 index 000000000..3d2da02e0 --- /dev/null +++ b/avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package docs.scaladsl + +import com.sksamuel.avro4s.RecordFormat +import org.apache.pekko.testkit.TestKit +import org.scalatest.{ BeforeAndAfterAll, Suite } + +import java.io.File +import scala.reflect.io.Directory + +trait AbstractAvroParquet extends BeforeAndAfterAll with AbstractAvroParquetBase { + this: Suite with TestKit => + + val format: RecordFormat[Document] = RecordFormat[Document] + + override def afterAll(): Unit = { + TestKit.shutdownActorSystem(system) + val directory = new Directory(new File(folder)) + directory.deleteRecursively() + } +} diff --git a/avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala b/avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala new file mode 100644 index 000000000..bd91822d6 --- /dev/null +++ b/avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package docs.scaladsl + +import com.sksamuel.avro4s._ +import org.apache.pekko.testkit.TestKit +import org.scalatest.{ BeforeAndAfterAll, Suite } + +import java.io.File + +trait AbstractAvroParquet extends BeforeAndAfterAll with AbstractAvroParquetBase { + this: Suite with TestKit => + + implicit val toRecordDocument: ToRecord[Document] = ToRecord[Document](schema) + implicit val fromRecordDocument: FromRecord[Document] = FromRecord[Document](schema) + val format: RecordFormat[Document] = RecordFormat[Document](schema) + + override def afterAll(): Unit = { + TestKit.shutdownActorSystem(system) + deleteRecursively(new File(folder)) + } + + private def deleteRecursively(f: File): Boolean = { + if (f.isDirectory) f.listFiles match { + case null => + case xs => xs.foreach(deleteRecursively) + } + f.delete() + } +} diff --git a/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala b/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala similarity index 88% rename from avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala rename to avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala index 1369544d0..97a4052f8 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala @@ -13,26 +13,18 @@ package docs.scaladsl -import java.io.File - -import org.apache.pekko.testkit.TestKit -import com.sksamuel.avro4s.RecordFormat import org.apache.avro.Schema import org.apache.avro.generic.{ GenericRecord, GenericRecordBuilder } import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.parquet.avro.{ AvroParquetReader, AvroParquetWriter, AvroReadSupport } -import org.apache.parquet.hadoop.{ ParquetReader, ParquetWriter } import org.apache.parquet.hadoop.util.HadoopInputFile +import org.apache.parquet.hadoop.{ ParquetReader, ParquetWriter } import org.scalacheck.Gen -import org.scalatest.{ BeforeAndAfterAll, Suite } -import scala.reflect.io.Directory import scala.util.Random -trait AbstractAvroParquet extends BeforeAndAfterAll { - this: Suite with TestKit => - +trait AbstractAvroParquetBase { case class Document(id: String, body: String) val schema: Schema = new Schema.Parser().parse( @@ -42,13 +34,13 @@ trait AbstractAvroParquet extends BeforeAndAfterAll { Gen.oneOf(Seq(Document(id = Gen.alphaStr.sample.get, body = Gen.alphaLowerStr.sample.get))) val genDocuments: Int => Gen[List[Document]] = n => Gen.listOfN(n, genDocument) - val format: RecordFormat[Document] = RecordFormat[Document] - val folder: String = "./" + Random.alphanumeric.take(8).mkString("") val genFinalFile: Gen[String] = for { fileName <- Gen.alphaLowerStr - } yield { folder + "/" + fileName + ".parquet" } + } yield { + folder + "/" + fileName + ".parquet" + } val genFile: Gen[String] = Gen.oneOf(Seq(Gen.alphaLowerStr.sample.get + ".parquet")) @@ -110,8 +102,8 @@ trait AbstractAvroParquet extends BeforeAndAfterAll { import org.apache.avro.generic.GenericRecord import org.apache.hadoop.fs.Path import org.apache.parquet.avro.AvroParquetReader - import org.apache.parquet.hadoop.util.HadoopInputFile import org.apache.parquet.hadoop.ParquetReader + import org.apache.parquet.hadoop.util.HadoopInputFile val file: String = "./sample/path/test.parquet" val writer: ParquetWriter[GenericRecord] = @@ -124,10 +116,4 @@ trait AbstractAvroParquet extends BeforeAndAfterAll { if (writer != null && reader != null) { // forces val usage } } - - override def afterAll(): Unit = { - TestKit.shutdownActorSystem(system) - val directory = new Directory(new File(folder)) - directory.deleteRecursively() - } } diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala index 626b15400..c52de6acc 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala @@ -13,6 +13,9 @@ package docs.scaladsl +import com.sksamuel.avro4s.Record +import org.apache.avro.generic.GenericRecord +import org.apache.parquet.hadoop.ParquetWriter import org.apache.pekko import pekko.NotUsed import pekko.actor.ActorSystem @@ -20,13 +23,10 @@ import pekko.stream.connectors.avroparquet.scaladsl.AvroParquetFlow import pekko.stream.scaladsl.{ Flow, Sink, Source } import pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped import pekko.testkit.TestKit -import com.sksamuel.avro4s.Record -import org.apache.avro.generic.GenericRecord -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpecLike -import org.apache.parquet.hadoop.ParquetWriter import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike class AvroParquetFlowSpec extends TestKit(ActorSystem("FlowSpec")) diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala index b92170a37..18b414e94 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala @@ -13,18 +13,18 @@ package docs.scaladsl +import com.sksamuel.avro4s.Record +import org.apache.avro.generic.GenericRecord +import org.apache.parquet.hadoop.ParquetWriter import org.apache.pekko -import pekko.{ Done, NotUsed } import pekko.actor.ActorSystem import pekko.stream.connectors.avroparquet.scaladsl.AvroParquetSink import pekko.stream.scaladsl.Source import pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped import pekko.testkit.TestKit -import com.sksamuel.avro4s.{ Record, RecordFormat } -import org.scalatest.concurrent.ScalaFutures -import org.apache.avro.generic.GenericRecord -import org.apache.parquet.hadoop.ParquetWriter +import pekko.{ Done, NotUsed } import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike @@ -63,7 +63,7 @@ class AvroParquetSinkSpec val documents: List[Document] = genDocuments(n).sample.get val writer: ParquetWriter[Record] = parquetWriter[Record](file, conf, schema) // #init-sink - val records: List[Record] = documents.map(RecordFormat[Document].to(_)) + val records: List[Record] = documents.map(format.to(_)) val source: Source[Record, NotUsed] = Source(records) val result: Future[Done] = source .runWith(AvroParquetSink(writer)) diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala index ea2300d6b..2fa88db1e 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala @@ -13,6 +13,9 @@ package docs.scaladsl +import com.sksamuel.avro4s.Record +import org.apache.avro.generic.GenericRecord +import org.apache.parquet.hadoop.ParquetReader import org.apache.pekko import pekko.NotUsed import pekko.actor.ActorSystem @@ -21,11 +24,8 @@ import pekko.stream.scaladsl.{ Keep, Source } import pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped import pekko.stream.testkit.scaladsl.TestSink import pekko.testkit.TestKit -import com.sksamuel.avro4s.Record -import org.scalatest.concurrent.ScalaFutures -import org.apache.avro.generic.GenericRecord -import org.apache.parquet.hadoop.ParquetReader import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike diff --git a/build.sbt b/build.sbt index deeaf3022..a6b815c93 100644 --- a/build.sbt +++ b/build.sbt @@ -277,16 +277,7 @@ lazy val ironmq = pekkoConnectorProject( lazy val jms = pekkoConnectorProject("jms", "jms", Dependencies.Jms) -val scalaReleaseSeparateSource: Def.SettingsDefinition = Compile / unmanagedSourceDirectories ++= { - if (scalaVersion.value.startsWith("2")) { - Seq((LocalRootProject / baseDirectory).value / "src" / "main" / "scala-2") - } else { - Seq((LocalRootProject / baseDirectory).value / "src" / "main" / "scala-3") - } -} - -lazy val jsonStreaming = pekkoConnectorProject("json-streaming", "json.streaming", - Dependencies.JsonStreaming ++ scalaReleaseSeparateSource) +lazy val jsonStreaming = pekkoConnectorProject("json-streaming", "json.streaming", Dependencies.JsonStreaming) lazy val kinesis = pekkoConnectorProject("kinesis", "aws.kinesis", Dependencies.Kinesis) diff --git a/docs/src/main/paradox/avroparquet.md b/docs/src/main/paradox/avroparquet.md index 270aad232..752eca4ee 100644 --- a/docs/src/main/paradox/avroparquet.md +++ b/docs/src/main/paradox/avroparquet.md @@ -29,7 +29,7 @@ Sometimes it might be useful to use a Parquet file as stream Source. For this we instance which will produce records as subtypes of `GenericRecord`, the Avro record's abstract representation. Scala -: @@snip (/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala) { #prepare-source #init-reader } +: @@snip (/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala) { #prepare-source #init-reader } Java : @@snip (/avroparquet/src/test/java/docs/javadsl/Examples.java) { #init-reader } @@ -49,7 +49,7 @@ On the other hand, you can use `AvroParquetWriter` as the Apache Pekko Streams S In that case, its initialisation would require an instance of `org.apache.parquet.hadoop.ParquetWriter`. It will also expect any subtype of `GenericRecord` to be passed. Scala -: @@snip (/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala) { #prepare-sink } +: @@snip (/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala) { #prepare-sink } Java : @@snip (/avroparquet/src/test/java/docs/javadsl/AvroParquetSinkTest.java) { #init-writer } diff --git a/project/Common.scala b/project/Common.scala index 2f020dff0..adaa1f49b 100644 --- a/project/Common.scala +++ b/project/Common.scala @@ -51,7 +51,7 @@ object Common extends AutoPlugin { "com.google.api:com.google.cloud:com.google.iam:com.google.logging:" + "com.google.longrunning:com.google.protobuf:com.google.rpc:com.google.type" - override lazy val projectSettings = Dependencies.Common ++ Seq( + override lazy val projectSettings = Dependencies.CommonSettings ++ Seq( projectInfoVersion := (if (isSnapshot.value) "snapshot" else version.value), crossVersion := CrossVersion.binary, crossScalaVersions := Dependencies.ScalaVersions, diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 88c3dab13..0708a1c6f 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -56,7 +56,7 @@ object Dependencies { val log4jOverSlf4jVersion = "1.7.36" val jclOverSlf4jVersion = "1.7.36" - val Common = Seq( + val CommonSettings = Seq( // These libraries are added to all modules via the `Common` AutoPlugin libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-stream" % PekkoVersion)) @@ -154,13 +154,16 @@ object Dependencies { libraryDependencies ++= Seq( "com.google.jimfs" % "jimfs" % "1.2" % Test)) + val avro4sVersion: Def.Initialize[String] = Def.setting { + if (Common.isScala3.value) "5.0.4" else "4.1.1" + } + val AvroParquet = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( - "org.apache.parquet" % "parquet-avro" % "1.10.1", - ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), - ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), - "com.sksamuel.avro4s" %% "avro4s-core" % "4.1.1" % Test, + "org.apache.parquet" % "parquet-avro" % "1.10.1", // Apache2 + ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 + ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 + "com.sksamuel.avro4s" %% "avro4s-core" % avro4sVersion.value % Test, "org.scalacheck" %% "scalacheck" % scalaCheckVersion % Test, "org.specs2" %% "specs2-core" % "4.20.0" % Test, // MIT like: https://github.com/etorreborre/specs2/blob/master/LICENSE.txt "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html From 9f13ddb0675c0c8a44ee239c4fe77519b654cb44 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 12 Jun 2023 18:58:27 +0100 Subject: [PATCH 31/90] suggested change to queue method call (#174) --- .../connectors/json/impl/QueueHelper.scala | 19 ----------------- .../connectors/json/impl/QueueHelper.scala | 21 ------------------- .../json/impl/JsonStreamReader.scala | 2 +- 3 files changed, 1 insertion(+), 41 deletions(-) delete mode 100644 json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala delete mode 100644 json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala diff --git a/json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala b/json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala deleted file mode 100644 index 5972a1eca..000000000 --- a/json-streaming/src/main/scala-2/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * license agreements; and to You under the Apache License, version 2.0: - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * This file is part of the Apache Pekko project, derived from Akka. - */ - -package org.apache.pekko.stream.connectors.json.impl - -import org.apache.pekko.util.ByteString - -import scala.collection.immutable.Queue - -private[impl] object QueueHelper { - @inline final def enqueue(queue: Queue[ByteString], byteString: ByteString): Queue[ByteString] = - queue.enqueue(byteString) -} diff --git a/json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala b/json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala deleted file mode 100644 index 27b1f7f5e..000000000 --- a/json-streaming/src/main/scala-3/org/apache/pekko/stream/connectors/json/impl/QueueHelper.scala +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * license agreements; and to You under the Apache License, version 2.0: - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * This file is part of the Apache Pekko project, derived from Akka. - */ - -package org.apache.pekko.stream.connectors.json.impl - -import org.apache.pekko.util.ByteString - -import scala.collection.immutable.Queue - -private[impl] object QueueHelper { - inline final def enqueue(queue: Queue[ByteString], byteString: ByteString): Queue[ByteString] = { - // see https://github.com/lampepfl/dotty/issues/17946 - queue.enqueueAll(Iterable.single(byteString)) - } -} diff --git a/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala b/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala index 990b68729..8f50d51d2 100644 --- a/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala +++ b/json-streaming/src/main/scala/org/apache/pekko/stream/connectors/json/impl/JsonStreamReader.scala @@ -50,7 +50,7 @@ private[pekko] final class JsonStreamReader(path: JsonPath) extends GraphStage[F new JsonPathListener { override def onValue(value: Any, context: ParsingContext): Unit = { // see https://github.com/lampepfl/dotty/issues/17946 - buffer = QueueHelper.enqueue(buffer, ByteString(value.toString)) + buffer = buffer.enqueue[ByteString](ByteString(value.toString)) } }) .build From 565651a29c958170cbe134b59a017b4eae6e9b08 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 26 Jun 2023 19:19:28 +0100 Subject: [PATCH 32/90] use LEGAL-626 apache header (#182) --- .../src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala | 2 +- .../src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala | 2 +- .../pekko/stream/connectors/google/jwt/JwtSprayJson.scala | 2 +- .../pekko/stream/connectors/jms/impl/GraphStageCompanion.scala | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala b/avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala index 3d2da02e0..87fc383a8 100644 --- a/avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala +++ b/avroparquet/src/test/scala-2/docs/scaladsl/AbstractAvroParquet.scala @@ -4,7 +4,7 @@ * * https://www.apache.org/licenses/LICENSE-2.0 * - * This file is part of the Apache Pekko project, derived from Akka. + * This file is part of the Apache Pekko project, which was derived from Akka. */ /* diff --git a/avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala b/avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala index bd91822d6..f3358adb1 100644 --- a/avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala +++ b/avroparquet/src/test/scala-3/docs/scaladsl/AbstractAvroParquet.scala @@ -4,7 +4,7 @@ * * https://www.apache.org/licenses/LICENSE-2.0 * - * This file is part of the Apache Pekko project, derived from Akka. + * This file is part of the Apache Pekko project, which was derived from Akka. */ /* diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala index 8362fa88e..854d84f81 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala @@ -4,7 +4,7 @@ * * https://www.apache.org/licenses/LICENSE-2.0 * - * This file is part of the Apache Pekko project, derived from Akka. + * This file is part of the Apache Pekko project, which was derived from Akka. */ package org.apache.pekko.stream.connectors.google.jwt diff --git a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala index e43a4d242..554d5d8aa 100644 --- a/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala +++ b/jms/src/main/scala/org/apache/pekko/stream/connectors/jms/impl/GraphStageCompanion.scala @@ -4,7 +4,7 @@ * * https://www.apache.org/licenses/LICENSE-2.0 * - * This file is part of the Apache Pekko project, derived from Akka. + * This file is part of the Apache Pekko project, which was derived from Akka. */ package org.apache.pekko.stream.connectors.jms.impl From d9f4eed4483259a37b57d3bba210f1ff6c4f3c60 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 26 Jun 2023 19:20:36 +0100 Subject: [PATCH 33/90] acknowledge use of jwt-scala code (#181) --- LICENSE | 7 + build.sbt | 1 + .../connectors/google/jwt/JwtSprayJson.scala | 3 + legal/GoogleCommonLicense.txt | 208 ++++++++++++++++++ project/MetaInfLicenseNoticeCopy.scala | 3 + 5 files changed, 222 insertions(+) create mode 100644 legal/GoogleCommonLicense.txt diff --git a/LICENSE b/LICENSE index dff33d762..e8e5b8a10 100644 --- a/LICENSE +++ b/LICENSE @@ -232,3 +232,10 @@ This code was released under an Apache 2.0 license. AWS SDK for Java Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +--------------- + +pekko-connectors-google-common contains `org.apache.pekko.stream.connectors.google.jwt.JwtSprayJson.scala` +which is copied from jwt-scala . +The original code was released under the Apache 2.0 license. +Copyright 2021 JWT-Scala Contributors. diff --git a/build.sbt b/build.sbt index a6b815c93..71a978158 100644 --- a/build.sbt +++ b/build.sbt @@ -194,6 +194,7 @@ lazy val googleCommon = pekkoConnectorProject( "google-common", "google.common", Dependencies.GoogleCommon, + MetaInfLicenseNoticeCopy.googleCommonSettings, Test / fork := true) lazy val googleCloudBigQuery = pekkoConnectorProject( diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala index 854d84f81..d10fff1ca 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/jwt/JwtSprayJson.scala @@ -18,6 +18,9 @@ import spray.json._ /** * Implementation of `JwtCore` using `JsObject` from spray-json. + * + * This is a copy of Apache licensed (version 2.0) code from + * https://github.com/jwt-scala/jwt-scala/blob/224f16124ea49a1cc5144a647e3767de4267ee7c/json/spray-json/src/main/scala/JwtSprayJson.scala */ @InternalApi private[google] trait JwtSprayJsonParser[H, C] extends JwtJsonCommon[JsObject, H, C] { diff --git a/legal/GoogleCommonLicense.txt b/legal/GoogleCommonLicense.txt new file mode 100644 index 000000000..1a6c25202 --- /dev/null +++ b/legal/GoogleCommonLicense.txt @@ -0,0 +1,208 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--------------- + +pekko-connectors-google-common contains `org.apache.pekko.stream.connectors.google.jwt.JwtSprayJson.scala` +which is copied from jwt-scala . +The original code was released under the Apache 2.0 license. +Copyright 2021 JWT-Scala Contributors. diff --git a/project/MetaInfLicenseNoticeCopy.scala b/project/MetaInfLicenseNoticeCopy.scala index f2f7d7138..dafe920d8 100644 --- a/project/MetaInfLicenseNoticeCopy.scala +++ b/project/MetaInfLicenseNoticeCopy.scala @@ -39,6 +39,9 @@ object MetaInfLicenseNoticeCopy extends AutoPlugin { apacheSonatypeLicenseFile := baseDir.value / "legal" / "S3License.txt", apacheSonatypeNoticeFile := baseDir.value / "legal" / "S3Notice.txt") + lazy val googleCommonSettings = Seq( + apacheSonatypeLicenseFile := baseDir.value / "legal" / "GoogleCommonLicense.txt") + override def trigger = allRequirements override def requires = ApacheSonatypePlugin From 26329595a145266416d02cdec598ff96592d4fe0 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Tue, 27 Jun 2023 11:12:45 +0100 Subject: [PATCH 34/90] unused import --- .../connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala index c2737e94c..51b846ef3 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/impl/UnixDomainSocketImpl.scala @@ -17,7 +17,7 @@ package impl import org.apache.pekko import pekko.actor.{ Cancellable, CoordinatedShutdown, ExtendedActorSystem, Extension } import pekko.annotation.InternalApi -import pekko.event.{ LogSource, Logging, LoggingAdapter } +import pekko.event.{ Logging, LoggingAdapter } import pekko.stream._ import pekko.stream.connectors.unixdomainsocket.scaladsl.UnixDomainSocket.{ IncomingConnection, From 8c5d65051cfeb3df4a0eb87039c33131b6b21ccc Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Fri, 18 Aug 2023 20:06:49 +0100 Subject: [PATCH 35/90] add scala3 support for s3 (#167) * add futiles license * some more compile issues * more compile issues * Update S3.scala * try .toInt * remove ASF header from futiles code * Final touches to get S3 working for Scala 3 --------- Co-authored-by: Matthew de Detrich --- LICENSE | 15 +++- legal/S3License.txt | 7 ++ project/Dependencies.scala | 4 +- .../stream/connectors/s3/impl/S3Stream.scala | 33 ++++--- .../s3/impl/auth/CanonicalRequest.scala | 4 +- .../stream/connectors/s3/javadsl/S3.scala | 1 + .../stream/connectors/s3/TestUtils.scala | 6 +- .../s3/impl/SplitAfterSizeSpec.scala | 3 +- .../s3/impl/auth/CanonicalRequestSpec.scala | 2 +- .../auth/SplitAfterSizeWithContextSpec.scala | 3 +- .../connectors/s3/impl/retry/Retry.scala | 86 +++++++++++++++++++ .../connectors/s3/impl/retry/Timeouts.scala | 45 ++++++++++ .../s3/scaladsl/S3IntegrationSpec.scala | 6 +- .../s3/scaladsl/S3WireMockBase.scala | 3 +- 14 files changed, 188 insertions(+), 30 deletions(-) create mode 100644 s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Retry.scala create mode 100644 s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Timeouts.scala diff --git a/LICENSE b/LICENSE index e8e5b8a10..0889548ba 100644 --- a/LICENSE +++ b/LICENSE @@ -202,7 +202,14 @@ --------------- -pekko-mqtt-streaming contains code from paho.mqtt.java . +pekko-connectors-google-common contains `org.apache.pekko.stream.connectors.google.jwt.JwtSprayJson.scala` +which is copied from jwt-scala . +The original code was released under the Apache 2.0 license. +Copyright 2021 JWT-Scala Contributors. + +--------------- + +pekko-connectors-mqtt-streaming contains code from paho.mqtt.java . This code was released under a dual license: Eclipse Public License version 2.0 and Eclipse Distribution License. We choose to use the code under the Eclipse Distribution License. @@ -235,7 +242,7 @@ Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. --------------- -pekko-connectors-google-common contains `org.apache.pekko.stream.connectors.google.jwt.JwtSprayJson.scala` -which is copied from jwt-scala . +pekko-connectors-s3 contains test code in `org.apache.pekko.stream.connectors.s3.impl.retry` package +which is copied from futiles . The original code was released under the Apache 2.0 license. -Copyright 2021 JWT-Scala Contributors. +Copyright 2015 Johan Andrén. diff --git a/legal/S3License.txt b/legal/S3License.txt index e64484eab..5ec7aa63f 100644 --- a/legal/S3License.txt +++ b/legal/S3License.txt @@ -209,3 +209,10 @@ This code was released under an Apache 2.0 license. AWS SDK for Java Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +--------------- + +pekko-connectors-s3 contains test code in `org.apache.pekko.stream.connectors.s3.impl.retry` package +which is copied from futiles . +The original code was released under the Apache 2.0 license. +Copyright 2015 Johan Andrén. diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 0708a1c6f..2effbd4fc 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -364,7 +364,6 @@ object Dependencies { )) val S3 = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-xml" % PekkoHttpVersion, @@ -373,8 +372,7 @@ object Dependencies { "com.google.jimfs" % "jimfs" % "1.2" % Test, "com.github.tomakehurst" % "wiremock-jre8" % "2.32.0" % Test, "org.scalacheck" %% "scalacheck" % scalaCheckVersion % Test, - "org.scalatestplus" %% scalaTestScalaCheckArtifact % scalaTestScalaCheckVersion % Test, - "com.markatta" %% "futiles" % "2.0.2" % Test)) + "org.scalatestplus" %% scalaTestScalaCheckArtifact % scalaTestScalaCheckVersion % Test)) val SpringWeb = { val SpringVersion = "5.1.17.RELEASE" diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala index 3ba8f8cf2..78c47caa6 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala @@ -29,7 +29,7 @@ import pekko.http.scaladsl.{ ClientTransport, Http } import pekko.stream.connectors.s3.BucketAccess.{ AccessDenied, AccessGranted, NotExists } import pekko.stream.connectors.s3._ import pekko.stream.connectors.s3.impl.auth.{ CredentialScope, Signer, SigningKey } -import pekko.stream.scaladsl.{ Flow, Keep, RetryFlow, RunnableGraph, Sink, Source, Tcp } +import pekko.stream.scaladsl.{ Flow, Keep, RetryFlow, RunnableGraph, Sink, Source, SubFlow, Tcp } import pekko.stream.{ Attributes, Materializer } import pekko.util.ByteString import pekko.{ Done, NotUsed } @@ -1177,11 +1177,15 @@ import scala.util.{ Failure, Success, Try } import conf.multipartUploadSettings.retrySettings._ - SplitAfterSize(chunkSize, chunkBufferSize)(atLeastOneByteString) - .via(getChunkBuffer(chunkSize, chunkBufferSize, maxRetries)) // creates the chunks - .mergeSubstreamsWithParallelism(parallelism) + val source1: SubFlow[Chunk, NotUsed, Flow[ByteString, ByteString, NotUsed]#Repr, Sink[ByteString, NotUsed]] = + SplitAfterSize(chunkSize, chunkBufferSize)(atLeastOneByteString) + .via(getChunkBuffer(chunkSize, chunkBufferSize, maxRetries)) // creates the chunks + + val source2 = source1.mergeSubstreamsWithParallelism(parallelism) .filter(_.size > 0) .via(atLeastOne) + + source2 .zip(requestInfoOrUploadState(s3Location, contentType, s3Headers, initialUploadState)) .groupBy(parallelism, { case (_, (_, chunkIndex)) => chunkIndex % parallelism }) // Allow requests that fail with transient errors to be retried, using the already buffered chunk. @@ -1278,11 +1282,18 @@ import scala.util.{ Failure, Success, Try } Flow[(ByteString, C)].orElse( Source.single((ByteString.empty, null.asInstanceOf[C]))) - SplitAfterSizeWithContext(chunkSize)(atLeastOneByteStringAndEmptyContext) - .via(getChunk(chunkBufferSize)) - .mergeSubstreamsWithParallelism(parallelism) - .filter { case (chunk, _) => chunk.size > 0 } - .via(atLeastOne) + val source1: SubFlow[(Chunk, immutable.Iterable[C]), NotUsed, Flow[(ByteString, C), (ByteString, C), + NotUsed]#Repr, Sink[(ByteString, C), NotUsed]] = + SplitAfterSizeWithContext(chunkSize)(atLeastOneByteStringAndEmptyContext) + .via(getChunk(chunkBufferSize)) + + val source2: Flow[(ByteString, C), (Chunk, immutable.Iterable[C]), NotUsed] = + source1 + .mergeSubstreamsWithParallelism(parallelism) + .filter { case (chunk, _) => chunk.size > 0 } + .via(atLeastOne) + + source2 .zip(requestInfoOrUploadState(s3Location, contentType, s3Headers, initialUploadState)) .groupBy(parallelism, { case (_, (_, chunkIndex)) => chunkIndex % parallelism }) .map { @@ -1379,9 +1390,9 @@ import scala.util.{ Failure, Success, Try } import mat.executionContext Sink .seq[UploadPartResponse] - .mapMaterializedValue { responseFuture: Future[immutable.Seq[UploadPartResponse]] => + .mapMaterializedValue { (responseFuture: Future[immutable.Seq[UploadPartResponse]]) => responseFuture - .flatMap { responses: immutable.Seq[UploadPartResponse] => + .flatMap { (responses: immutable.Seq[UploadPartResponse]) => val successes = responses.collect { case r: SuccessfulUploadPart => r } val failures = responses.collect { case r: FailedUploadPart => r } if (responses.isEmpty) { diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala index 87617f42d..f8270026f 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequest.scala @@ -69,7 +69,7 @@ import pekko.http.scaladsl.model.{ HttpHeader, HttpRequest } def canonicalQueryString(query: Query): String = { def uriEncode(s: String): String = s.flatMap { case c if isUnreservedCharacter(c) => c.toString - case c => "%" + c.toHexString.toUpperCase + case c => "%" + Integer.toHexString(c).toUpperCase } query @@ -99,7 +99,7 @@ import pekko.http.scaladsl.model.{ HttpHeader, HttpRequest } if (path.isEmpty) "/" else { path.toString.flatMap { - case c if isReservedCharacter(c) => "%" + c.toHexString.toUpperCase + case c if isReservedCharacter(c) => "%" + Integer.toHexString(c).toUpperCase case c => c.toString } } diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala index b7392a073..41078d8f0 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala @@ -29,6 +29,7 @@ import pekko.stream.connectors.s3.headers.{ CannedAcl, ServerSideEncryption } import pekko.stream.connectors.s3._ import pekko.stream.connectors.s3.impl._ import pekko.stream.javadsl.{ RunnableGraph, Sink, Source } +import pekko.stream.scaladsl.SourceToCompletionStage import pekko.util.ccompat.JavaConverters._ import pekko.util.ByteString import pekko.util.OptionConverters._ diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/TestUtils.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/TestUtils.scala index 85fb79a05..a16a28cff 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/TestUtils.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/TestUtils.scala @@ -17,11 +17,11 @@ package org.apache.pekko.stream.connectors.s3 -import markatta.futiles.Retry import org.apache.commons.lang3.StringUtils import org.apache.pekko -import org.apache.pekko.stream.connectors.s3.scaladsl.S3 -import org.apache.pekko.stream.scaladsl.Sink +import pekko.stream.connectors.s3.impl.retry.Retry +import pekko.stream.connectors.s3.scaladsl.S3 +import pekko.stream.scaladsl.Sink import org.scalacheck.Gen import pekko.actor.ActorSystem import pekko.stream.Attributes diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala index 5733b28f9..2871e6537 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/SplitAfterSizeSpec.scala @@ -121,6 +121,7 @@ class SplitAfterSizeSpec(_system: ActorSystem) Seq(ByteString(16), ByteString(17, 18)))) } - def bytes(start: Byte, end: Byte): Array[Byte] = (start to end).map(_.toByte).toArray[Byte] + // https://github.com/lampepfl/dotty/issues/18068 + def bytes(start: Byte, end: Byte): Array[Byte] = (start.toInt to end).map(_.toByte).toArray[Byte] } diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequestSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequestSpec.scala index 9658056da..f17877cd0 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequestSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/CanonicalRequestSpec.scala @@ -128,7 +128,7 @@ class CanonicalRequestSpec extends AnyFlatSpec with Matchers { val reservedCharacters = ":?#[]@!$&'()*+,;=" reservedCharacters.foreach { char => withClue(s"failed for path containing reserved character [$char]:") { - val expectedCharEncoding = "%" + char.toHexString.toUpperCase + val expectedCharEncoding = "%" + Integer.toHexString(char).toUpperCase val request = HttpRequest( diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SplitAfterSizeWithContextSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SplitAfterSizeWithContextSpec.scala index 3f18c84e1..740062365 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SplitAfterSizeWithContextSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SplitAfterSizeWithContextSpec.scala @@ -79,6 +79,7 @@ class SplitAfterSizeWithContextSpec(_system: ActorSystem) Seq((ByteString(17, 18), 2)))) } - def bytes(start: Byte, end: Byte): Array[Byte] = (start to end).map(_.toByte).toArray[Byte] + // https://github.com/lampepfl/dotty/issues/18068 + def bytes(start: Byte, end: Byte): Array[Byte] = (start.toInt to end).map(_.toByte).toArray[Byte] } diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Retry.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Retry.scala new file mode 100644 index 000000000..bed649e60 --- /dev/null +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Retry.scala @@ -0,0 +1,86 @@ +/* + * Copyright 2015 Johan Andrén + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.pekko.stream.connectors.s3.impl.retry + +import java.util.concurrent.{ ThreadLocalRandom, TimeUnit } +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ ExecutionContext, Future } +import scala.util.Random + +// copied from https://github.com/johanandren/futiles/blob/18868f252bbf5dd71d2cd0fc67e7eb39863b686a/src/main/scala/markatta/futiles/Retry.scala +object Retry { + + private val alwaysRetry: Throwable => Boolean = _ => true + + /** + * Evaluate a block that creates a future up to a specific number of times, if the future fails, decide about + * retrying using a predicate, if it should retry an exponential back off is applied so that the retry waits longer + * and longer for every retry it makes. A jitter is also added so that the exact timing of the retry isn't exactly + * the same for all calls with the same backOffUnit + * + * Any exception in the block creating the future will also be returned as a failed future Default is to retry for + * all throwables. + * + * Based on this wikipedia article: http://en.wikipedia.org/wiki/Truncated_binary_exponential_backoff + */ + def retryWithBackOff[A]( + times: Int, + backOffUnit: FiniteDuration, + shouldRetry: Throwable => Boolean = alwaysRetry)(fBlock: => Future[A])(implicit ec: ExecutionContext): Future[A] = + try + if (times <= 1) fBlock + else retryWithBackOffLoop(times, 1, backOffUnit, shouldRetry)(fBlock) + catch { + // failure to actually create the future + case x: Throwable => Future.failed(x) + } + + private def retryWithBackOffLoop[A]( + totalTimes: Int, + timesTried: Int, + backOffUnit: FiniteDuration, + shouldRetry: Throwable => Boolean)(fBlock: => Future[A])(implicit ec: ExecutionContext): Future[A] = + if (totalTimes <= timesTried) fBlock + else + fBlock.recoverWith { + case ex: Throwable if shouldRetry(ex) => + val timesTriedNow = timesTried + 1 + val backOff = nextBackOff(timesTriedNow, backOffUnit) + Timeouts + .timeout(backOff)(()) + .flatMap(_ => + retryWithBackOffLoop( + totalTimes, + timesTriedNow, + backOffUnit, + shouldRetry)(fBlock)) + } + + private def nextBackOff( + tries: Int, + backOffUnit: FiniteDuration): FiniteDuration = { + require(tries > 0, "tries should start from 1") + val rng = new Random(ThreadLocalRandom.current()) + // jitter between 0.5 and 1.5 + val jitter = 0.5 + rng.nextDouble() + val factor = math.pow(2, tries) * jitter + FiniteDuration( + (backOffUnit.toMillis * factor).toLong, + TimeUnit.MILLISECONDS) + } + +} diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Timeouts.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Timeouts.scala new file mode 100644 index 000000000..f8be639d4 --- /dev/null +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/retry/Timeouts.scala @@ -0,0 +1,45 @@ +/* + * Copyright 2015 Johan Andrén + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.pekko.stream.connectors.s3.impl.retry + +import java.util.{ Timer, TimerTask } +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ ExecutionContext, Future, Promise } +import scala.util.Try + +// copied from https://github.com/johanandren/futiles/blob/18868f252bbf5dd71d2cd0fc67e7eb39863b686a/src/main/scala/markatta/futiles/Timeouts.scala +object Timeouts { + + private val timer = new Timer() + + /** + * When ```waitFor``` has passed, evaluate ```what``` on the given execution context and complete the future + */ + def timeout[A](waitFor: FiniteDuration)(what: => A)(implicit ec: ExecutionContext): Future[A] = { + val promise = Promise[A]() + timer.schedule(new TimerTask { + override def run(): Unit = + // make sure we do not block the timer thread + Future { + promise.complete(Try(what)) + } + }, + waitFor.toMillis) + + promise.future + } +} diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala index 9989117c9..e13410191 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3IntegrationSpec.scala @@ -1293,8 +1293,8 @@ class AWSS3IntegrationSpec extends TestKit(ActorSystem("AWSS3IntegrationSpec")) }.orElse(Some(1.minute)) // Since S3 accounts share global state, we should randomly generate bucket names so concurrent tests - // against an S3 account don't conflict with eachother - override lazy val randomlyGenerateBucketNames: Boolean = + // against an S3 account don't conflict with each other + override val randomlyGenerateBucketNames: Boolean = sys.props.get("pekko.stream.connectors.s3.scaladsl.AWSS3IntegrationSpec.randomlyGenerateBucketNames") .map(_.toBoolean).getOrElse(true) } @@ -1313,7 +1313,7 @@ class MinioS3IntegrationSpec // Since a unique new Minio container is started with each test run there is no point in making random // bucket names - override lazy val randomlyGenerateBucketNames: Boolean = false + override val randomlyGenerateBucketNames: Boolean = false override lazy val defaultS3Settings: S3Settings = s3Settings .withS3RegionProvider( diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3WireMockBase.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3WireMockBase.scala index 522ed3a96..7a6bb91f1 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3WireMockBase.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3WireMockBase.scala @@ -33,7 +33,8 @@ import software.amazon.awssdk.regions.Region abstract class S3WireMockBase(_system: ActorSystem, val _wireMockServer: WireMockServer) extends TestKit(_system) { private def this(mock: WireMockServer) = - this(ActorSystem(getCallerName(getClass), config(mock.port()).withFallback(ConfigFactory.load())), mock) + this(ActorSystem(getCallerName(classOf[S3WireMockBase]), config(mock.port()).withFallback(ConfigFactory.load())), + mock) def this() = { this(initServer()) From 784827a8a4f95c40ed009d67a52b056f54b38564 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sat, 19 Aug 2023 10:12:47 +0200 Subject: [PATCH 36/90] Add scala3 support to Azure Storage Queue --- project/Dependencies.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 2effbd4fc..140406c08 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -101,7 +101,6 @@ object Dependencies { ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito) val AzureStorageQueue = Seq( - crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.microsoft.azure" % "azure-storage" % "8.0.0")) From c12dde2b4b002ca2667a1e2cb450408468dabeca Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 20 Aug 2023 19:52:49 +0100 Subject: [PATCH 37/90] grpc 1.0.0 release (#231) * grpc 1.0.0 release * removing the snapshot resolver breaks because our paradox plugin needs it --- build.sbt | 1 - project/plugins.sbt | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/build.sbt b/build.sbt index 71a978158..15e782c70 100644 --- a/build.sbt +++ b/build.sbt @@ -11,7 +11,6 @@ import net.bzzt.reproduciblebuilds.ReproducibleBuildsPlugin.reproducibleBuildsCh // TODO: Remove when Pekko has a proper release ThisBuild / resolvers += Resolver.ApacheMavenSnapshotsRepo -ThisBuild / resolvers ++= Resolver.sonatypeOssRepos("snapshots") ThisBuild / updateOptions := updateOptions.value.withLatestSnapshots(false) ThisBuild / apacheSonatypeProjectProfile := "pekko" diff --git a/project/plugins.sbt b/project/plugins.sbt index bff2d3ae3..fe2565adf 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -37,6 +37,6 @@ addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2") addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1") // Pekko gRPC -- sync with version in Dependencies.scala:29 -addSbtPlugin("org.apache.pekko" % "pekko-grpc-sbt-plugin" % "1.0.0-RC1-3-ae23c14d-SNAPSHOT") +addSbtPlugin("org.apache.pekko" % "pekko-grpc-sbt-plugin" % "1.0.0") // templating addSbtPlugin("io.spray" % "sbt-boilerplate" % "0.6.1") From 712c4235c3a527e9a1bae03f2e2d8d249a6f669b Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 21 Aug 2023 09:04:39 +0100 Subject: [PATCH 38/90] update release notes (1.0.0) (#229) * update release notes (1.0.0) * publish 1.0 docs * fix snapshot docs --- .github/workflows/publish-1.0-docs.yml | 65 +++++++++++++++++++ .github/workflows/publish-nightly.yml | 2 +- docs/src/main/paradox/other-docs/snapshots.md | 18 +++-- docs/src/main/paradox/release-notes/index.md | 49 +++++++++++++- 4 files changed, 122 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/publish-1.0-docs.yml diff --git a/.github/workflows/publish-1.0-docs.yml b/.github/workflows/publish-1.0-docs.yml new file mode 100644 index 000000000..cc52e1dd7 --- /dev/null +++ b/.github/workflows/publish-1.0-docs.yml @@ -0,0 +1,65 @@ +name: Publish 1.0 docs + +on: + workflow_dispatch: + +jobs: + publish: + # runs on main repo only + if: github.repository == 'apache/incubator-pekko-connectors' + name: Publish + runs-on: ubuntu-20.04 + env: + JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves + fetch-depth: 0 + + - name: Setup Java 8 + uses: actions/setup-java@v3 + with: + distribution: temurin + java-version: 8 + + - name: Build Documentation + run: |- + sbt "set ThisBuild / version := \"1.0.0\"; docs/paradox unidoc" + + # Create directory structure upfront since rsync does not create intermediate directories otherwise + - name: Create directory structure + run: |- + mkdir -p target/nightly-docs/docs/pekko-connectors/1.0.0/ + mkdir -p target/nightly-docs/docs/pekko-connectors/1.0/ + cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-connectors/1.0.0/docs + cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-connectors/1.0/docs + rm -r docs/target/paradox/site/main/ + cp -r target/scala-2.13/unidoc target/nightly-docs/docs/pekko-connectors/1.0.0/api + cp -r target/scala-2.13/unidoc target/nightly-docs/docs/pekko-connectors/1.0/api + rm -r target/scala-2.13/unidoc + + - name: Upload 1.0.x docs + uses: ./.github/actions/sync-nightlies + with: + upload: true + switches: --archive --compress --update --delete --progress --relative + local_path: target/nightly-docs/./docs/pekko-connectors/1.0.0 # The intermediate dot is to show `--relative` which paths to operate on + remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/ + remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }} + remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }} + remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }} + remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }} + + - name: Upload 1.0 docs + uses: ./.github/actions/sync-nightlies + with: + upload: true + switches: --archive --compress --update --delete --progress --relative + local_path: target/nightly-docs/./docs/pekko-connectors/1.0 # The intermediate dot is to show `--relative` which paths to operate on + remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/ + remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }} + remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }} + remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }} + remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }} diff --git a/.github/workflows/publish-nightly.yml b/.github/workflows/publish-nightly.yml index e945c799c..23b6072e5 100644 --- a/.github/workflows/publish-nightly.yml +++ b/.github/workflows/publish-nightly.yml @@ -48,7 +48,7 @@ jobs: with: upload: true switches: --archive --compress --update --delete --progress --relative - local_path: target/nightly-docs/./docs/pekko-connectors/ # The intermediate dot is to show `--relative` which paths to operate on + local_path: target/nightly-docs/./docs/pekko-connectors/${{ github.ref_name }}-snapshot # The intermediate dot is to show `--relative` which paths to operate on remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/ remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }} remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }} diff --git a/docs/src/main/paradox/other-docs/snapshots.md b/docs/src/main/paradox/other-docs/snapshots.md index f1169f90f..65d7ef50c 100644 --- a/docs/src/main/paradox/other-docs/snapshots.md +++ b/docs/src/main/paradox/other-docs/snapshots.md @@ -1,7 +1,4 @@ -# Snapshots - -[snapshots-badge]: https://img.shields.io/nexus/s/org.pekko/pekko-connectors-csv_2.13?server=https%3A%2F%2Foss.sonatype.org -[snapshots]: https://oss.sonatype.org/content/repositories/snapshots/com/lightbend/akka/pekko-connectors-csv_2.13/ +# Snapshots Snapshots are published to the Sonatype Snapshot repository after every successful build on 'main' branch. Add the following to your project build definition to resolve Apache Pekko Connectors snapshots: @@ -15,8 +12,8 @@ Maven snapshots-repo - Sonatype snapshots - https://oss.sonatype.org/content/repositories/snapshots + Apache snapshots + https://repository.apache.org/content/groups/snapshots ... @@ -25,14 +22,15 @@ Maven sbt : ```scala - resolvers += Resolver.sonatypeRepo("snapshots") + resolvers += "Apache Staging" at "https://repository.apache.org/content/groups/snapshots" + resolvers += Resolver.ApacheMavenSnapshotsRepo // use this if you are using sbt 1.9.0 or above ``` Gradle : ```gradle repositories { maven { - url "https://oss.sonatype.org/content/repositories/snapshots" + url "https://repository.apache.org/content/groups/snapshots" } } ``` @@ -44,6 +42,6 @@ The [snapshot documentation](https://pekko.apache.org/docs/pekko-connectors/snap ## Versions -Latest published snapshot version is [![snapshots-badge][]][snapshots] +To find the latest published snapshot version, have a look at https://repository.apache.org/content/groups/snapshots/org/apache/pekko/pekko-connectors-csv_2.13/ -The snapshot repository is cleaned from time to time with no further notice. Check [Sonatype snapshots Apache Pekko Connectors Kafka files](https://oss.sonatype.org/content/repositories/snapshots/com/lightbend/akka/) to see what versions are currently available. +The snapshot repository is cleaned from time to time with no further notice. Check https://repository.apache.org/content/groups/snapshots/org/apache/pekko/ to see what versions are currently available. diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md index b9f65ac1b..97970f842 100644 --- a/docs/src/main/paradox/release-notes/index.md +++ b/docs/src/main/paradox/release-notes/index.md @@ -2,6 +2,53 @@ To understand the forces on version numbers, read about @ref:[Apache Pekko Connectors' versioning scheme](../other-docs/versioning.md). -@@toc { depth=2 } +## 1.0.0 + +Apache Pekko Connectors 1.0.0 is based on Alpakka 4.0.0. Pekko came about as a result of Lightbend's decision to make future +Akka releases under a [Business Software License](https://www.lightbend.com/blog/why-we-are-changing-the-license-for-akka), +a license that is not compatible with Open Source usage. + +Apache Pekko has changed the package names, among other changes. An example package name change is that the +Pekko Connectors equivalent of `akka.stream.alpakka.jms` is `org.apache.pekko.stream.connectors.jms`. +The `akka` part is replaced by `org.apache.pekko` and the `alpakka` part is replaced by `connectors`. + +Config names that started with `akka` have changed to +use `pekko` instead. Config names that started with `alpakka` have changed to use `pekko.connectors`. + +Users switching from Akka to Pekko should read our [Pekko Migration Guide](https://pekko.apache.org/docs/pekko/current/project/migration-guides.html). + +Generally, we have tried to make it as easy as possible to switch existing Akka based projects over to using Pekko. + +We have gone through the code base and have tried to properly acknowledge all third party source code in the +Apache Pekko code base. If anyone believes that there are any instances of third party source code that is not +properly acknowledged, please get in touch. + +### Bug Fixes +We haven't had to fix many bugs that were in Alpakka 4.0.0. + +* Fix some cases where functions were accidentally calling themselves, leading to infinite recursion + * [PR142](https://github.com/apache/incubator-pekko-connectors/pull/142) + * [PR164](https://github.com/apache/incubator-pekko-connectors/pull/164) + * [PR186](https://github.com/apache/incubator-pekko-connectors/pull/186) +* S3 Connector: Force US_EAST_1 for listBuckets call ([PR66](https://github.com/apache/incubator-pekko-connectors/pull/66)) +* S3 Connector: Only pass SSE headers for multipart upload requests ([PR81](https://github.com/apache/incubator-pekko-connectors/pull/81)) + +### Additions +* Add back Scala 2.12 support ([PR65](https://github.com/apache/incubator-pekko-connectors/pull/65)) +* Scala 3 support ([126](https://github.com/apache/incubator-pekko-connectors/issues/126)) + * The connectors that still only support Scala 2 are Geode, MongoDB and Slick. +* FTP Connector now supports UTF8 Autodetect mode ([PR221](https://github.com/apache/incubator-pekko-connectors/pull/221)) +* IronMQ Connector: changed the Circe JSON integration to use [mdedetrich/pekko-streams-circe](https://github.com/mdedetrich/pekko-streams-circe) ([PR134](https://github.com/apache/incubator-pekko-connectors/pull/134)) +* S3 Connector: Add Bucket With Versioning API support ([PR84](https://github.com/apache/incubator-pekko-connectors/pull/84)) + +### Dependency Upgrades +We have tried to limit the changes to third party dependencies that are used in Pekko HTTP 1.0.0. These are some exceptions: + +* Cassandra Driver 4.15.0 ([PR100](https://github.com/apache/incubator-pekko-connectors/pull/100)) +* jackson 2.14.3 +* scalatest 3.2.14. Pekko users who have existing tests based on Akka Testkit may need to migrate their tests due to the scalatest upgrade. The [scalatest 3.2 release notes](https://www.scalatest.org/release_notes/3.2.0) have a detailed description of the changes needed. + + +## Extra Documentation * [Alpakka Release Notes](https://doc.akka.io/docs/alpakka/current/release-notes/index.html) From d2b6e3d53df3a52066a6ba041c6361e3b5ecbd2f Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 21 Aug 2023 10:37:39 +0100 Subject: [PATCH 39/90] fix issue in publish-1.0-docs.yml (#232) --- .github/workflows/publish-1.0-docs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-1.0-docs.yml b/.github/workflows/publish-1.0-docs.yml index cc52e1dd7..583304679 100644 --- a/.github/workflows/publish-1.0-docs.yml +++ b/.github/workflows/publish-1.0-docs.yml @@ -26,7 +26,7 @@ jobs: - name: Build Documentation run: |- - sbt "set ThisBuild / version := \"1.0.0\"; docs/paradox unidoc" + sbt "set ThisBuild / version := \"1.0.0\"; docs/paradox; unidoc" # Create directory structure upfront since rsync does not create intermediate directories otherwise - name: Create directory structure From 1241de68ee22eaea719cf0929b2e1c186994dee1 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 21 Aug 2023 11:01:27 +0100 Subject: [PATCH 40/90] Upgrade protobuf java due to CVEs (#230) * update release notes (1.0.0) * use protobuf-java 3.21.12 --- docs/src/main/paradox/release-notes/index.md | 3 ++- project/Dependencies.scala | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md index 97970f842..1916247a9 100644 --- a/docs/src/main/paradox/release-notes/index.md +++ b/docs/src/main/paradox/release-notes/index.md @@ -35,7 +35,7 @@ We haven't had to fix many bugs that were in Alpakka 4.0.0. ### Additions * Add back Scala 2.12 support ([PR65](https://github.com/apache/incubator-pekko-connectors/pull/65)) -* Scala 3 support ([126](https://github.com/apache/incubator-pekko-connectors/issues/126)) +* Scala 3 support ([#126](https://github.com/apache/incubator-pekko-connectors/issues/126)) * The connectors that still only support Scala 2 are Geode, MongoDB and Slick. * FTP Connector now supports UTF8 Autodetect mode ([PR221](https://github.com/apache/incubator-pekko-connectors/pull/221)) * IronMQ Connector: changed the Circe JSON integration to use [mdedetrich/pekko-streams-circe](https://github.com/mdedetrich/pekko-streams-circe) ([PR134](https://github.com/apache/incubator-pekko-connectors/pull/134)) @@ -45,6 +45,7 @@ We haven't had to fix many bugs that were in Alpakka 4.0.0. We have tried to limit the changes to third party dependencies that are used in Pekko HTTP 1.0.0. These are some exceptions: * Cassandra Driver 4.15.0 ([PR100](https://github.com/apache/incubator-pekko-connectors/pull/100)) +* protobuf 3.21.12 ([#222](https://github.com/apache/incubator-pekko-connectors/issues/222)) * jackson 2.14.3 * scalatest 3.2.14. Pekko users who have existing tests based on Akka Testkit may need to migrate their tests due to the scalatest upgrade. The [scalatest 3.2 release notes](https://www.scalatest.org/release_notes/3.2.0) have a detailed description of the changes needed. diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 140406c08..9ec4c475f 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -33,6 +33,7 @@ object Dependencies { val ScalaTestVersion = "3.2.14" val TestContainersScalaTestVersion = "0.40.14" val mockitoVersion = "4.2.0" // check even https://github.com/scalatest/scalatestplus-mockito/releases + val protobufJavaVersion = "3.21.12" val hoverflyVersion = "0.14.1" val scalaCheckVersion = "1.16.0" @@ -211,6 +212,7 @@ object Dependencies { "org.apache.avro" % "avro" % "1.9.2" % "provided", "org.apache.arrow" % "arrow-vector" % "4.0.0" % "provided", "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, + "com.google.protobuf" % "protobuf-java" % protobufJavaVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-core" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, @@ -231,6 +233,7 @@ object Dependencies { "com.google.cloud" % "google-cloud-pubsub" % "1.112.5" % "protobuf-src", "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, "com.google.auth" % "google-auth-library-oauth2-http" % "0.22.2", + "com.google.protobuf" % "protobuf-java" % protobufJavaVersion, // pull in Pekko Discovery for our Pekko version "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) From a4ecf0ab431c048cea7053e88688c438db5fdfc4 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 21 Aug 2023 11:58:03 +0100 Subject: [PATCH 41/90] retry mqtt stream test that fails a lot (#196) * increase mqtt test timeout * Update MqttSessionSpec.scala --- .../scala/docs/scaladsl/MqttSessionSpec.scala | 252 +++++++++--------- 1 file changed, 129 insertions(+), 123 deletions(-) diff --git a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala index 854768115..237f301fa 100644 --- a/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala +++ b/mqtt-streaming/src/test/scala/docs/scaladsl/MqttSessionSpec.scala @@ -31,8 +31,8 @@ import pekko.stream.OverflowStrategy import pekko.testkit._ import pekko.util.{ ByteString, Timeout } import org.scalatest.BeforeAndAfterAll -import org.scalatest.concurrent.ScalaFutures -import org.scalatest.time.{ Millis, Span } +import org.scalatest.concurrent.{ Eventually, ScalaFutures } +import org.scalatest.time.{ Millis, Minutes, Span } import scala.concurrent.{ ExecutionContext, Promise } import scala.concurrent.duration._ @@ -40,12 +40,15 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import org.slf4j.LoggerFactory +import scala.util.Right + class MqttSessionSpec extends TestKit(ActorSystem("mqtt-spec")) with AnyWordSpecLike with BeforeAndAfterAll with ScalaFutures with Matchers + with Eventually with LogCapturing { val log = LoggerFactory.getLogger(classOf[MqttSessionSpec]) @@ -1919,156 +1922,159 @@ class MqttSessionSpec // longer patience needed since Akka 2.6 implicit val patienceConfig: PatienceConfig = PatienceConfig(scaled(1.second), scaled(50.millis)) - val serverSession = ActorMqttServerSession(settings.withProducerPubAckRecTimeout(10.millis)) - - val client1 = TestProbe() - val toClient1 = Sink.foreach[ByteString](bytes => client1.ref ! bytes) - val (client1Connection, fromClient1) = Source - .queue[ByteString](1, OverflowStrategy.dropHead) - .toMat(BroadcastHub.sink)(Keep.both) - .run() - - val pipeToClient1 = Flow.fromSinkAndSource(toClient1, fromClient1) - - val client2 = TestProbe() - val toClient2 = Sink.foreach[ByteString](bytes => client2.ref ! bytes) - val (client2Connection, fromClient2) = Source - .queue[ByteString](0, OverflowStrategy.dropHead) - .toMat(BroadcastHub.sink)(Keep.both) - .run() - - val pipeToClient2 = Flow.fromSinkAndSource(toClient2, fromClient2) + // https://github.com/apache/incubator-pekko-connectors/issues/148 + eventually(timeout(Span(1, Minutes))) { + val serverSession = ActorMqttServerSession(settings.withProducerPubAckRecTimeout(10.millis)) - val clientId = "some-client-id" - - val connect = Connect(clientId, ConnectFlags.None) - val connect1Received = Promise[Done]() - val connect2Received = Promise[Done]() - - val subscribe = Subscribe("some-topic") - val subscribe1Received = Promise[Done]() - val subscribe2Received = Promise[Done]() - - val pubAckReceived = Promise[Done]() + val client1 = TestProbe() + val toClient1 = Sink.foreach[ByteString](bytes => client1.ref ! bytes) + val (client1Connection, fromClient1) = Source + .queue[ByteString](1, OverflowStrategy.dropHead) + .toMat(BroadcastHub.sink)(Keep.both) + .run() - val disconnect = Disconnect - val disconnectReceived = Promise[Done]() + val pipeToClient1 = Flow.fromSinkAndSource(toClient1, fromClient1) - val serverConnection1 = - Source - .queue[Command[Nothing]](1, OverflowStrategy.fail) - .via( - Mqtt - .serverSessionFlow(serverSession, ByteString.empty) - .join(pipeToClient1)) - .wireTap(Sink.foreach[Either[DecodeError, Event[_]]] { - case Right(Event(`connect`, _)) => - connect1Received.success(Done) - case Right(Event(cp: Subscribe, _)) if cp.topicFilters == subscribe.topicFilters => - subscribe1Received.success(Done) - case Right(Event(`disconnect`, _)) => - disconnectReceived.success(Done) - case other => fail(s"didn't match `$other`") - }) - .toMat(Sink.seq)(Keep.left) + val client2 = TestProbe() + val toClient2 = Sink.foreach[ByteString](bytes => client2.ref ! bytes) + val (client2Connection, fromClient2) = Source + .queue[ByteString](0, OverflowStrategy.dropHead) + .toMat(BroadcastHub.sink)(Keep.both) .run() - val connectBytes = connect.encode(ByteString.newBuilder).result() - val connAck = ConnAck(ConnAckFlags.None, ConnAckReturnCode.ConnectionAccepted) - val connAckBytes = connAck.encode(ByteString.newBuilder).result() + val pipeToClient2 = Flow.fromSinkAndSource(toClient2, fromClient2) - val subscribeBytes = subscribe.encode(ByteString.newBuilder, PacketId(1)).result() - val subAck = SubAck(PacketId(1), List(ControlPacketFlags.QoSAtLeastOnceDelivery)) - val subAckBytes = subAck.encode(ByteString.newBuilder).result() + val clientId = "some-client-id" - val publish = Publish("some-topic", ByteString("some-payload")) - val publishBytes = publish.encode(ByteString.newBuilder, Some(PacketId(1))).result() - val dupPublishBytes = publish - .copy(flags = publish.flags | ControlPacketFlags.DUP) - .encode(ByteString.newBuilder, Some(PacketId(1))) - .result() - val pubAck = PubAck(PacketId(1)) - val pubAckBytes = pubAck.encode(ByteString.newBuilder).result() + val connect = Connect(clientId, ConnectFlags.None) + val connect1Received = Promise[Done]() + val connect2Received = Promise[Done]() - val disconnectBytes = disconnect.encode(ByteString.newBuilder).result() + val subscribe = Subscribe("some-topic") + val subscribe1Received = Promise[Done]() + val subscribe2Received = Promise[Done]() - client1Connection.offer(connectBytes) + val pubAckReceived = Promise[Done]() - connect1Received.future.futureValue shouldBe Done + val disconnect = Disconnect + val disconnectReceived = Promise[Done]() - serverConnection1.offer(Command(connAck)) - client1.expectMsg(connAckBytes) + val serverConnection1 = + Source + .queue[Command[Nothing]](1, OverflowStrategy.fail) + .via( + Mqtt + .serverSessionFlow(serverSession, ByteString.empty) + .join(pipeToClient1)) + .wireTap(Sink.foreach[Either[DecodeError, Event[_]]] { + case Right(Event(`connect`, _)) => + connect1Received.success(Done) + case Right(Event(cp: Subscribe, _)) if cp.topicFilters == subscribe.topicFilters => + subscribe1Received.success(Done) + case Right(Event(`disconnect`, _)) => + disconnectReceived.success(Done) + case other => fail(s"didn't match `$other`") + }) + .toMat(Sink.seq)(Keep.left) + .run() - client1Connection.offer(subscribeBytes) + val connectBytes = connect.encode(ByteString.newBuilder).result() + val connAck = ConnAck(ConnAckFlags.None, ConnAckReturnCode.ConnectionAccepted) + val connAckBytes = connAck.encode(ByteString.newBuilder).result() - subscribe1Received.future.futureValue shouldBe Done + val subscribeBytes = subscribe.encode(ByteString.newBuilder, PacketId(1)).result() + val subAck = SubAck(PacketId(1), List(ControlPacketFlags.QoSAtLeastOnceDelivery)) + val subAckBytes = subAck.encode(ByteString.newBuilder).result() - serverConnection1.offer(Command(subAck)) - client1.expectMsg(subAckBytes) + val publish = Publish("some-topic", ByteString("some-payload")) + val publishBytes = publish.encode(ByteString.newBuilder, Some(PacketId(1))).result() + val dupPublishBytes = publish + .copy(flags = publish.flags | ControlPacketFlags.DUP) + .encode(ByteString.newBuilder, Some(PacketId(1))) + .result() + val pubAck = PubAck(PacketId(1)) + val pubAckBytes = pubAck.encode(ByteString.newBuilder).result() - serverSession ! Command(publish) - client1.expectMsg(publishBytes) + val disconnectBytes = disconnect.encode(ByteString.newBuilder).result() - // Perform an explicit disconnect otherwise, if for example, we - // just completed the client connection, the session may receive - // the associated ConnectionLost signal for the new connection - // given that the new connection occurs so quickly. - client1Connection.offer(disconnectBytes) + client1Connection.offer(connectBytes) - disconnectReceived.future.futureValue shouldBe Done + connect1Received.future.futureValue shouldBe Done - val serverConnection2 = - Source - .queue[Command[Nothing]](1, OverflowStrategy.fail) - .via( - Mqtt - .serverSessionFlow(serverSession, ByteString.empty) - .join(pipeToClient2)) - .wireTap(Sink.foreach[Either[DecodeError, Event[_]]] { - case Right(Event(`connect`, _)) => - connect2Received.success(Done) - case Right(Event(cp: Subscribe, _)) if cp.topicFilters == subscribe.topicFilters => - subscribe2Received.success(Done) - case Right(Event(_: PubAck, _)) => - pubAckReceived.success(Done) - case other => fail(s"didn't match `$other`") - }) - .toMat(Sink.seq)(Keep.left) - .run() + serverConnection1.offer(Command(connAck)) + client1.expectMsg(connAckBytes) - client2Connection.offer(connectBytes) + client1Connection.offer(subscribeBytes) - connect2Received.future.futureValue shouldBe Done + subscribe1Received.future.futureValue shouldBe Done - serverConnection2.offer(Command(connAck)) - client2.expectMsg(6.seconds, connAckBytes) + serverConnection1.offer(Command(subAck)) + client1.expectMsg(subAckBytes) - client2Connection.offer(subscribeBytes) + serverSession ! Command(publish) + client1.expectMsg(publishBytes) - subscribe2Received.future.futureValue shouldBe Done + // Perform an explicit disconnect otherwise, if for example, we + // just completed the client connection, the session may receive + // the associated ConnectionLost signal for the new connection + // given that the new connection occurs so quickly. + client1Connection.offer(disconnectBytes) - serverConnection2.offer(Command(subAck)) + disconnectReceived.future.futureValue shouldBe Done - client2.fishForMessage(3.seconds.dilated) { - case msg: ByteString if msg == dupPublishBytes => true - case _ => false - } + val serverConnection2 = + Source + .queue[Command[Nothing]](1, OverflowStrategy.fail) + .via( + Mqtt + .serverSessionFlow(serverSession, ByteString.empty) + .join(pipeToClient2)) + .wireTap(Sink.foreach[Either[DecodeError, Event[_]]] { + case Right(Event(`connect`, _)) => + connect2Received.success(Done) + case Right(Event(cp: Subscribe, _)) if cp.topicFilters == subscribe.topicFilters => + subscribe2Received.success(Done) + case Right(Event(_: PubAck, _)) => + pubAckReceived.success(Done) + case other => fail(s"didn't match `$other`") + }) + .toMat(Sink.seq)(Keep.left) + .run() + + client2Connection.offer(connectBytes) + + connect2Received.future.futureValue shouldBe Done + + serverConnection2.offer(Command(connAck)) + client2.expectMsg(6.seconds, connAckBytes) + + client2Connection.offer(subscribeBytes) + + subscribe2Received.future.futureValue shouldBe Done + + serverConnection2.offer(Command(subAck)) + + client2.fishForMessage(3.seconds.dilated) { + case msg: ByteString if msg == dupPublishBytes => true + case _ => false + } - client2Connection.offer(pubAckBytes) - pubAckReceived.future.futureValue shouldBe Done + client2Connection.offer(pubAckBytes) + pubAckReceived.future.futureValue shouldBe Done - client1Connection.complete() - client2Connection.complete() - serverConnection1.complete() - serverConnection2.complete() + client1Connection.complete() + client2Connection.complete() + serverConnection1.complete() + serverConnection2.complete() - for { - _ <- client1Connection.watchCompletion() - _ <- client2Connection.watchCompletion() - _ <- serverConnection1.watchCompletion() - _ <- serverConnection2.watchCompletion() - } serverSession.shutdown() + for { + _ <- client1Connection.watchCompletion() + _ <- client2Connection.watchCompletion() + _ <- serverConnection1.watchCompletion() + _ <- serverConnection2.watchCompletion() + } serverSession.shutdown() + } } } From f51c4e0682de4ce85673336dd3df6e57a8017d6f Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 21 Aug 2023 12:37:16 +0100 Subject: [PATCH 42/90] broken link in overview.md (#233) * broken link in overview.md * Update overview.md --- docs/src/main/paradox/overview.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/src/main/paradox/overview.md b/docs/src/main/paradox/overview.md index ea2d13cf3..7ef5e3780 100644 --- a/docs/src/main/paradox/overview.md +++ b/docs/src/main/paradox/overview.md @@ -29,12 +29,7 @@ Please feel free to contribute to Apache Pekko Connectors by reporting issues yo We want Apache Pekko and Apache Pekko Connectors to strive in a welcoming and open atmosphere and expect all contributors to respect our [code of conduct](https://www.apache.org/foundation/policies/conduct.html). -[![pekko-connectors]][pekko-connectors-scaladex] Feel free to tag your project with *pekko-connectors* keyword in Scaladex for easier discoverability. - -[pekko-connectors]: https://index.scala-lang.org/count.svg?q=topics:pekko-streams&subject=pekko-connectors&style=flat-square - -[pekko-connectors-scaladex]: https://index.scala-lang.org/search?q=topics:pekko-connectors - +Feel free to tag your project with *pekko-connectors* keyword in [Scaladex](https://index.scala-lang.org/search?topics=pekko-connnectors) for easier discoverability. @@ toc { .main depth=2 } From b815b4eaa1789639ad0ea31b48512ceb5020e12f Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Mon, 21 Aug 2023 15:52:51 +0200 Subject: [PATCH 43/90] Add TODO note for Scala 3 workaround --- .../org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala index 78c47caa6..5fba9b443 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala @@ -1177,6 +1177,7 @@ import scala.util.{ Failure, Success, Try } import conf.multipartUploadSettings.retrySettings._ + // TODO: Scala 3 workaround, see https://github.com/lampepfl/dotty/issues/18438 val source1: SubFlow[Chunk, NotUsed, Flow[ByteString, ByteString, NotUsed]#Repr, Sink[ByteString, NotUsed]] = SplitAfterSize(chunkSize, chunkBufferSize)(atLeastOneByteString) .via(getChunkBuffer(chunkSize, chunkBufferSize, maxRetries)) // creates the chunks From 5496b36809a05d56d114126e2a24538b8b9c2b45 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sat, 10 Jun 2023 12:33:09 +0200 Subject: [PATCH 44/90] Add Scala3 support for Geode Co-Authored-By: Matt Dziuban --- build.sbt | 2 + docs/src/main/paradox/geode.md | 4 +- .../geode/impl/pdx/ObjectDecoder.scala | 44 +++++++++++++ .../geode/impl/pdx/ObjectEncoder.scala | 40 ++++++++++++ .../impl/pdx/LabelledGenericGeneric.scala | 64 +++++++++++++++++++ .../geode/impl/pdx/ObjectDecoder.scala | 49 ++++++++++++++ .../geode/impl/pdx/ObjectEncoder.scala | 44 +++++++++++++ .../geode/impl/pdx/PdxDecoder.scala | 32 +--------- .../geode/impl/pdx/PdxEncoder.scala | 30 +-------- .../geode/impl/pdx/PdxWriterMock.scala | 2 +- project/Dependencies.scala | 12 ++-- 11 files changed, 260 insertions(+), 63 deletions(-) create mode 100644 geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala create mode 100644 geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala create mode 100644 geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala create mode 100644 geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala create mode 100644 geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala diff --git a/build.sbt b/build.sbt index 15e782c70..30bfcb0d9 100644 --- a/build.sbt +++ b/build.sbt @@ -186,6 +186,8 @@ lazy val geode = List("-Xlint:-byname-implicit") case Some((2, n)) if n == 12 => List.empty + case Some((3, _)) => + List.empty } }) diff --git a/docs/src/main/paradox/geode.md b/docs/src/main/paradox/geode.md index 04d9d3070..522e9dbd7 100644 --- a/docs/src/main/paradox/geode.md +++ b/docs/src/main/paradox/geode.md @@ -69,7 +69,9 @@ Java : @@snip [snip](/geode/src/test/java/docs/javadsl/PersonPdxSerializer.java) { #person-pdx-serializer } -This Apache Pekko Connectors Geode provides a generic solution for Scala users based on [Shapeless](https://github.com/milessabin/shapeless) which may generate serializers for case classes at compile time. +This Apache Pekko Connectors Geode provides a generic solution for Scala users based on [Shapeless](https://github.com/milessabin/shapeless) for Scala 2 and +for Scala 3 using the [built-in tuple generic metaprogramming](https://www.scala-lang.org/2021/02/26/tuples-bring-generic-programming-to-scala-3.html) +which may generate serializers for case classes at compile time. Java users need to implement custom serializers manually, or use runtime reflection as described in @extref[Using Automatic Reflection-Based PDX Serialization](geode:/developing/data_serialization/auto_serialization.html). diff --git a/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala b/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala new file mode 100644 index 000000000..2fd6852b3 --- /dev/null +++ b/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +package org.apache.pekko.stream.connectors.geode.impl.pdx + +import org.apache.pekko.annotation.InternalApi +import shapeless._ +import shapeless.labelled._ + +import scala.util.{ Failure, Success } + +@InternalApi +private[pekko] trait ObjectDecoder { + + implicit val hnilDecoder: PdxDecoder[HNil] = PdxDecoder.instance((_, _) => Success(HNil)) + + implicit def hlistDecoder[K <: Symbol, H, T <: HList]( + implicit witness: Witness.Aux[K], + hDecoder: Lazy[PdxDecoder[H]], + tDecoder: Lazy[PdxDecoder[T]]): PdxDecoder[FieldType[K, H] :: T] = PdxDecoder.instance { + case (reader, fieldName) => { + val headField = hDecoder.value.decode(reader, witness.value) + val tailFields = tDecoder.value.decode(reader, fieldName) + (headField, tailFields) match { + case (Success(h), Success(t)) => Success(field[K](h) :: t) + case _ => Failure(null) + } + } + case e => Failure(null) + } + + implicit def objectDecoder[A, Repr <: HList]( + implicit gen: LabelledGeneric.Aux[A, Repr], + hlistDecoder: PdxDecoder[Repr]): PdxDecoder[A] = PdxDecoder.instance { (reader, fieldName) => + hlistDecoder.decode(reader, fieldName).map(gen.from) + } + +} diff --git a/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala b/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala new file mode 100644 index 000000000..794e6c50f --- /dev/null +++ b/geode/src/main/scala-2/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, derived from Akka. + */ + +package org.apache.pekko.stream.connectors.geode.impl.pdx + +import org.apache.pekko.annotation.InternalApi +import shapeless._ +import shapeless.labelled._ +import shapeless.ops.hlist.IsHCons + +@InternalApi +private[pekko] trait ObjectEncoder { + + implicit val hnilEncoder: PdxEncoder[HNil] = + PdxEncoder.instance[HNil] { case _ => true } + implicit def hlistEncoder[K <: Symbol, H, T <: shapeless.HList]( + implicit witness: Witness.Aux[K], + isHCons: IsHCons.Aux[H :: T, H, T], + hEncoder: Lazy[PdxEncoder[H]], + tEncoder: Lazy[PdxEncoder[T]]): PdxEncoder[FieldType[K, H] :: T] = + PdxEncoder.instance[FieldType[K, H] :: T] { + case (writer, o, fieldName) => + hEncoder.value.encode(writer, isHCons.head(o), witness.value) + tEncoder.value.encode(writer, isHCons.tail(o), fieldName) + } + + implicit def objectEncoder[A, Repr <: HList]( + implicit gen: LabelledGeneric.Aux[A, Repr], + hlistEncoder: Lazy[PdxEncoder[Repr]]): PdxEncoder[A] = PdxEncoder.instance { + case (writer, o, fieldName) => + hlistEncoder.value.encode(writer, gen.to(o), fieldName) + } + +} diff --git a/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala new file mode 100644 index 000000000..835aac0e1 --- /dev/null +++ b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/LabelledGenericGeneric.scala @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.pekko.stream.connectors.geode.impl.pdx + +import org.apache.pekko.annotation.InternalApi + +import scala.deriving.Mirror + +@InternalApi +private[pekko] type FieldType[K, +V] = V with KeyTag[K, V] + +@InternalApi +private[pekko] object FieldType { + def label[K]: [V] => V => FieldType[K, V] = [V] => (v: V) => v.asInstanceOf[FieldType[K, V]] +} + +@InternalApi +private[pekko] type KeyTag[K, +V] + +@InternalApi +private[pekko] type ZipWith[T1 <: Tuple, T2 <: Tuple, F[_, _]] <: Tuple = (T1, T2) match { + case (h1 *: t1, h2 *: t2) => F[h1, h2] *: ZipWith[t1, t2, F] + case (EmptyTuple, ?) => EmptyTuple + case (?, EmptyTuple) => EmptyTuple + case _ => Tuple +} + +@InternalApi +private[pekko] trait LabelledGeneric[A] { + type Repr + def from(r: Repr): A + def to(a: A): Repr +} + +@InternalApi +private[pekko] object LabelledGeneric { + type Aux[A, R] = LabelledGeneric[A] { type Repr = R } + + inline def apply[A](using l: LabelledGeneric[A]): LabelledGeneric.Aux[A, l.Repr] = l + + inline given productInst[A <: Product]( + using m: Mirror.ProductOf[A]) + : LabelledGeneric.Aux[A, ZipWith[m.MirroredElemLabels, m.MirroredElemTypes, FieldType]] = + new LabelledGeneric[A] { + type Repr = Tuple & ZipWith[m.MirroredElemLabels, m.MirroredElemTypes, FieldType] + def from(r: Repr): A = m.fromTuple(r.asInstanceOf[m.MirroredElemTypes]) + def to(a: A): Repr = Tuple.fromProductTyped(a).asInstanceOf[Repr] + } +} diff --git a/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala new file mode 100644 index 000000000..a586e7ed6 --- /dev/null +++ b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectDecoder.scala @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.pekko.stream.connectors.geode.impl.pdx + +import org.apache.pekko.annotation.InternalApi + +import scala.util.{ Failure, Success } + +@InternalApi +private[pekko] trait ObjectDecoder { + + given emptyTupleDecoder: PdxDecoder[EmptyTuple] = PdxDecoder.instance((_, _) => Success(EmptyTuple)) + + given tupleDecoder[K <: String, H, T <: Tuple]( + using m: ValueOf[K], + hDecoder: PdxDecoder[H], + tDecoder: PdxDecoder[T]): PdxDecoder[FieldType[K, H] *: T] = PdxDecoder.instance { + case (reader, fieldName) => { + val headField = hDecoder.decode(reader, Symbol(m.value)) + val tailFields = tDecoder.decode(reader, fieldName) + (headField, tailFields) match { + case (Success(h), Success(t)) => Success(FieldType.label[K](h) *: t) + case _ => Failure(null) + } + } + case e => Failure(null) + } + + given objectDecoder[A, Repr <: Tuple]( + using gen: LabelledGeneric.Aux[A, Repr], + tupleDecoder: PdxDecoder[Repr]): PdxDecoder[A] = PdxDecoder.instance { (reader, fieldName) => + tupleDecoder.decode(reader, fieldName).map(gen.from) + } +} diff --git a/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala new file mode 100644 index 000000000..fea86fb49 --- /dev/null +++ b/geode/src/main/scala-3/org/apache/pekko/stream/connectors/geode/impl/pdx/ObjectEncoder.scala @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.pekko.stream.connectors.geode.impl.pdx + +import org.apache.pekko.annotation.InternalApi + +@InternalApi +private[pekko] trait ObjectEncoder { + + given emptyTupleEncoder: PdxEncoder[EmptyTuple] = + PdxEncoder.instance[EmptyTuple] { case _ => true } + + given tupleEncoder[K <: String, H, T <: Tuple](using + m: ValueOf[K], + hEncoder: PdxEncoder[H], + tEncoder: PdxEncoder[T]): PdxEncoder[FieldType[K, H] *: T] = + PdxEncoder.instance[FieldType[K, H] *: T] { + case (writer, o, fieldName) => + hEncoder.encode(writer, o.head, Symbol(m.value)) + tEncoder.encode(writer, o.tail, fieldName) + } + + given objectEncoder[A, Repr <: Tuple]( + using gen: LabelledGeneric.Aux[A, Repr], + tupleEncoder: PdxEncoder[Repr]): PdxEncoder[A] = PdxEncoder.instance { + case (writer, o, fieldName) => + tupleEncoder.encode(writer, gen.to(o), fieldName) + } +} diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala index 40fea422b..f3fb18d21 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxDecoder.scala @@ -18,7 +18,7 @@ import java.util.{ Date, UUID } import org.apache.pekko.annotation.InternalApi import org.apache.geode.pdx.PdxReader -import scala.util.{ Failure, Success, Try } +import scala.util.{ Success, Try } @InternalApi trait PdxDecoder[A] { @@ -27,18 +27,13 @@ trait PdxDecoder[A] { } -object PdxDecoder { +object PdxDecoder extends ObjectDecoder { - import shapeless._ - import shapeless.labelled._ - - private def instance[A](f: (PdxReader, Symbol) => Try[A]): PdxDecoder[A] = + private[pekko] def instance[A](f: (PdxReader, Symbol) => Try[A]): PdxDecoder[A] = new PdxDecoder[A] { def decode(reader: PdxReader, fieldName: Symbol) = f(reader, fieldName) } - implicit val hnilDecoder: PdxDecoder[HNil] = instance((_, _) => Success(HNil)) - implicit val booleanDecoder: PdxDecoder[Boolean] = instance { case (reader, fieldName) => Success(reader.readBoolean(fieldName.name)) @@ -149,27 +144,6 @@ object PdxDecoder { Try(reader.readObjectArray(fieldName.name).toSet.asInstanceOf[Set[T]]) } - implicit def hlistDecoder[K <: Symbol, H, T <: HList]( - implicit witness: Witness.Aux[K], - hDecoder: Lazy[PdxDecoder[H]], - tDecoder: Lazy[PdxDecoder[T]]): PdxDecoder[FieldType[K, H] :: T] = instance { - case (reader, fieldName) => { - val headField = hDecoder.value.decode(reader, witness.value) - val tailFields = tDecoder.value.decode(reader, fieldName) - (headField, tailFields) match { - case (Success(h), Success(t)) => Success(field[K](h) :: t) - case _ => Failure(null) - } - } - case e => Failure(null) - } - - implicit def objectDecoder[A, Repr <: HList]( - implicit gen: LabelledGeneric.Aux[A, Repr], - hlistDecoder: PdxDecoder[Repr]): PdxDecoder[A] = instance { (reader, fieldName) => - hlistDecoder.decode(reader, fieldName).map(gen.from) - } - def apply[A](implicit ev: PdxDecoder[A]): PdxDecoder[A] = ev } diff --git a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxEncoder.scala b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxEncoder.scala index e9e853505..61758c503 100644 --- a/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxEncoder.scala +++ b/geode/src/main/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxEncoder.scala @@ -17,45 +17,19 @@ import java.util.{ Date, UUID } import org.apache.pekko.annotation.InternalApi import org.apache.geode.pdx.PdxWriter -import shapeless.ops.hlist.IsHCons @InternalApi trait PdxEncoder[A] { def encode(writer: PdxWriter, a: A, fieldName: Symbol = null): Boolean } -object PdxEncoder { +object PdxEncoder extends ObjectEncoder { - import shapeless._ - import shapeless.labelled._ - - private def instance[A](f: (PdxWriter, A, Symbol) => Boolean) = + private[pekko] def instance[A](f: (PdxWriter, A, Symbol) => Boolean) = new PdxEncoder[A] { def encode(writer: PdxWriter, a: A, fieldName: Symbol = null): Boolean = f(writer, a, fieldName) } - implicit val hnilEncoder: PdxEncoder[HNil] = - instance[HNil] { case _ => true } - - implicit def hlistEncoder[K <: Symbol, H, T <: shapeless.HList]( - implicit witness: Witness.Aux[K], - isHCons: IsHCons.Aux[H :: T, H, T], - hEncoder: Lazy[PdxEncoder[H]], - tEncoder: Lazy[PdxEncoder[T]]): PdxEncoder[FieldType[K, H] :: T] = - instance[FieldType[K, H] :: T] { - case (writer, o, fieldName) => - hEncoder.value.encode(writer, isHCons.head(o), witness.value) - tEncoder.value.encode(writer, isHCons.tail(o), fieldName) - - } - - implicit def objectEncoder[A, Repr <: HList]( - implicit gen: LabelledGeneric.Aux[A, Repr], - hlistEncoder: Lazy[PdxEncoder[Repr]]): PdxEncoder[A] = instance { - case (writer, o, fieldName) => - hlistEncoder.value.encode(writer, gen.to(o), fieldName) - } - def apply[A](implicit enc: PdxEncoder[A]): PdxEncoder[A] = enc implicit def booleanEncoder: PdxEncoder[Boolean] = instance { diff --git a/geode/src/test/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxWriterMock.scala b/geode/src/test/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxWriterMock.scala index 2152ecd89..cdd377ff4 100644 --- a/geode/src/test/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxWriterMock.scala +++ b/geode/src/test/scala/org/apache/pekko/stream/connectors/geode/impl/pdx/PdxWriterMock.scala @@ -88,5 +88,5 @@ object PdxMocks { override def writeByte(fieldName: String, value: Byte) = { println(s"Write $value"); this } } - implicit val writerMock = new WriterMock() + implicit val writerMock: WriterMock = new WriterMock() } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 9ec4c475f..d3183886c 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -8,6 +8,7 @@ */ import sbt._ +import Common.isScala3 import Keys._ object Dependencies { @@ -178,13 +179,16 @@ object Dependencies { val GeodeVersionForDocs = "115" val Geode = Seq( - crossScalaVersions -= Scala3, - libraryDependencies ++= + libraryDependencies ++= { Seq("geode-core", "geode-cq") .map("org.apache.geode" % _ % GeodeVersion) ++ Seq( - "com.chuusai" %% "shapeless" % "2.3.3", - "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies) + "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies ++ + (if (isScala3.value) + Seq.empty // Equivalent and relevant shapeless functionality has been mainlined into Scala 3 language/stdlib + else Seq( + "com.chuusai" %% "shapeless" % "2.3.10")) + }) val GoogleCommon = Seq( libraryDependencies ++= Seq( From 620ae7da288db9f670b4b1bde3dccf6db2b05011 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Tue, 22 Aug 2023 00:50:59 +0100 Subject: [PATCH 45/90] Geode supports Scala 3 (#235) --- docs/src/main/paradox/release-notes/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md index 1916247a9..e820e18df 100644 --- a/docs/src/main/paradox/release-notes/index.md +++ b/docs/src/main/paradox/release-notes/index.md @@ -36,7 +36,7 @@ We haven't had to fix many bugs that were in Alpakka 4.0.0. ### Additions * Add back Scala 2.12 support ([PR65](https://github.com/apache/incubator-pekko-connectors/pull/65)) * Scala 3 support ([#126](https://github.com/apache/incubator-pekko-connectors/issues/126)) - * The connectors that still only support Scala 2 are Geode, MongoDB and Slick. + * The connectors that still only support Scala 2 are MongoDB and Slick. * FTP Connector now supports UTF8 Autodetect mode ([PR221](https://github.com/apache/incubator-pekko-connectors/pull/221)) * IronMQ Connector: changed the Circe JSON integration to use [mdedetrich/pekko-streams-circe](https://github.com/mdedetrich/pekko-streams-circe) ([PR134](https://github.com/apache/incubator-pekko-connectors/pull/134)) * S3 Connector: Add Bucket With Versioning API support ([PR84](https://github.com/apache/incubator-pekko-connectors/pull/84)) From be089331c1c868d2abae64aefc99f3d869837c2e Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Tue, 22 Aug 2023 00:51:22 +0100 Subject: [PATCH 46/90] fix users mailing list link (#236) --- README.md | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index eee273f92..2d8febb0b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -Apache Pekko Connectors [![scaladex-badge][]][scaladex] [![maven-central-badge][]][maven-central] [![CI on GitHub actions](https://github.com/apache/incubator-pekko-connectors/actions/workflows/check-build-test.yml/badge.svg)](https://github.com/apache/incubator-pekko-connectors/actions/workflows/check-build-test.yml)[![Nightly Builds](https://github.com/apache/incubator-pekko-connectors/actions/workflows/nightly-builds.yaml/badge.svg)](https://github.com/apache/incubator-pekko-connectors/actions/workflows/nightly-builds.yaml) -======= +# Apache Pekko Connectors [![scaladex-badge][]][scaladex] [![maven-central-badge][]][maven-central] [![CI on GitHub actions](https://github.com/apache/incubator-pekko-connectors/actions/workflows/check-build-test.yml/badge.svg)](https://github.com/apache/incubator-pekko-connectors/actions/workflows/check-build-test.yml)[![Nightly Builds](https://github.com/apache/incubator-pekko-connectors/actions/workflows/nightly-builds.yaml/badge.svg)](https://github.com/apache/incubator-pekko-connectors/actions/workflows/nightly-builds.yaml) [scaladex]: https://index.scala-lang.org/apache/incubator-pekko-connectors [scaladex-badge]: https://index.scala-lang.org/apache/incubator-pekko-connectors/latest.svg @@ -12,42 +11,36 @@ The Apache Pekko Connectors project is an open source initiative to implement st Pekko Connectors is a fork of [Alpakka](https://github.com/akka/alpakka) 4.0.0, prior to the Akka project's adoption of the Business Source License. -Documentation -------------- +## Documentation Pekko Connectors are documented at https://pekko.apache.org/docs/pekko-connectors/current/. To keep up with the latest releases check out [Pekko Connectors releases](https://github.com/apache/incubator-pekko-connectors/releases) and [Pekko Connectors Kafka releases](https://github.com/apache/incubator-pekko-connectors-kafka/releases). -Community ---------- +## Community You can join these forums and chats to discuss and ask Pekko and Pekko connector related questions: - [GitHub discussions](https://github.com/apache/incubator-pekko/discussions): for questions and general discussion. -- [Pekko user mailing list](https://lists.apache.org/list.html?user@pekko.apache.org): for Pekko Connectors usage discussions. +- [Pekko users mailing list](https://lists.apache.org/list.html?users@pekko.apache.org): for Pekko Connectors usage discussions. - [Pekko dev mailing list](https://lists.apache.org/list.html?dev@pekko.apache.org): for Pekko Connectors development discussions. - [GitHub issues](https://github.com/apache/incubator-pekko-connectors/issues): for bug reports and feature requests. Please search the existing issues before creating new ones. If you are unsure whether you have found a bug, consider asking in GitHub discussions or the mailing list first. -Contributing ------------- +## Contributing Contributions are very welcome. If you have an idea on how to improve Pekko, don't hesitate to create an issue or submit a pull request. See [CONTRIBUTING.md](https://github.com/apache/incubator-pekko-connectors/blob/main/CONTRIBUTING.md) for details on the development workflow and how to create your pull request. -Code of Conduct ---------------- +## Code of Conduct Apache Pekko is governed by the [Apache code of conduct](https://www.apache.org/foundation/policies/conduct.html). By participating in this project you agree to abide by its terms. -License -------- +## License Apache Pekko is available under the Apache License, version 2.0. See [LICENSE](https://github.com/apache/incubator-pekko-connectors/blob/main/LICENSE) file for details. -Caveat Emptor -------------- +## Caveat Emptor Pekko Connectors components are not always binary compatible between releases. API changes that are not backward compatible might be introduced as we refine and simplify based on your feedback. A module may be dropped in any release without prior deprecation. From 2a56aa7e81b0c03816300ddb3a375df1fd3874fc Mon Sep 17 00:00:00 2001 From: Sergey Gornostaev Date: Fri, 25 Aug 2023 23:06:02 +0800 Subject: [PATCH 47/90] FTP: Add for FTPS the ability to set KeyManager and TrustManager --------- Co-authored-by: Matthew de Detrich Co-authored-by: PJ Fanning --- .../connectors/ftp/impl/FtpsOperations.scala | 3 + .../pekko/stream/connectors/ftp/model.scala | 24 ++++- ...FtpsWithTrustAndKeyManagersStageSpec.scala | 96 +++++++++++++++++++ project/Dependencies.scala | 2 +- 4 files changed, 119 insertions(+), 6 deletions(-) create mode 100644 ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/FtpsWithTrustAndKeyManagersStageSpec.scala diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala index 4fad15ca3..5f725e05e 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/impl/FtpsOperations.scala @@ -31,6 +31,9 @@ private[ftp] trait FtpsOperations extends CommonFtpOperations { Try { connectionSettings.proxy.foreach(ftpClient.setProxy) + connectionSettings.keyManager.foreach(ftpClient.setKeyManager) + connectionSettings.trustManager.foreach(ftpClient.setTrustManager) + if (ftpClient.getAutodetectUTF8() != connectionSettings.autodetectUTF8) { ftpClient.setAutodetectUTF8(connectionSettings.autodetectUTF8) } diff --git a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala index e489d09dd..0a05dd994 100644 --- a/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala +++ b/ftp/src/main/scala/org/apache/pekko/stream/connectors/ftp/model.scala @@ -15,6 +15,8 @@ package org.apache.pekko.stream.connectors.ftp import java.net.InetAddress import java.net.Proxy +import javax.net.ssl.KeyManager +import javax.net.ssl.TrustManager import java.nio.file.attribute.PosixFilePermission import org.apache.pekko.annotation.{ DoNotInherit, InternalApi } @@ -185,7 +187,9 @@ final class FtpsSettings private ( val passiveMode: Boolean, val autodetectUTF8: Boolean, val configureConnection: FTPSClient => Unit, - val proxy: Option[Proxy]) extends FtpFileSettings { + val proxy: Option[Proxy], + val keyManager: Option[KeyManager], + val trustManager: Option[TrustManager]) extends FtpFileSettings { def withHost(value: java.net.InetAddress): FtpsSettings = copy(host = value) def withPort(value: Int): FtpsSettings = copy(port = value) @@ -196,6 +200,8 @@ final class FtpsSettings private ( def withAutodetectUTF8(value: Boolean): FtpsSettings = if (autodetectUTF8 == value) this else copy(autodetectUTF8 = value) def withProxy(value: Proxy): FtpsSettings = copy(proxy = Some(value)) + def withKeyManager(value: KeyManager): FtpsSettings = copy(keyManager = Some(value)) + def withTrustManager(value: TrustManager): FtpsSettings = copy(trustManager = Some(value)) /** * Scala API: @@ -220,7 +226,9 @@ final class FtpsSettings private ( passiveMode: Boolean = passiveMode, autodetectUTF8: Boolean = autodetectUTF8, configureConnection: FTPSClient => Unit = configureConnection, - proxy: Option[Proxy] = proxy): FtpsSettings = new FtpsSettings( + proxy: Option[Proxy] = proxy, + keyManager: Option[KeyManager] = keyManager, + trustManager: Option[TrustManager] = trustManager): FtpsSettings = new FtpsSettings( host = host, port = port, credentials = credentials, @@ -228,7 +236,9 @@ final class FtpsSettings private ( passiveMode = passiveMode, autodetectUTF8 = autodetectUTF8, configureConnection = configureConnection, - proxy = proxy) + proxy = proxy, + keyManager = keyManager, + trustManager = trustManager) override def toString = "FtpsSettings(" + @@ -239,7 +249,9 @@ final class FtpsSettings private ( s"passiveMode=$passiveMode," + s"autodetectUTF8=$autodetectUTF8" + s"configureConnection=$configureConnection," + - s"proxy=$proxy)" + s"proxy=$proxy" + + s"keyManager=$keyManager" + + s"trustManager=$trustManager)" } /** @@ -259,7 +271,9 @@ object FtpsSettings { passiveMode = false, autodetectUTF8 = false, configureConnection = _ => (), - proxy = None) + proxy = None, + keyManager = None, + trustManager = None) /** Java API */ def create(host: java.net.InetAddress): FtpsSettings = apply( diff --git a/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/FtpsWithTrustAndKeyManagersStageSpec.scala b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/FtpsWithTrustAndKeyManagersStageSpec.scala new file mode 100644 index 000000000..6fb9557b5 --- /dev/null +++ b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/FtpsWithTrustAndKeyManagersStageSpec.scala @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.pekko.stream.connectors.ftp + +import org.apache.pekko +import pekko.stream.IOResult +import pekko.stream.scaladsl.{ Sink, Source } +import pekko.util.ByteString +import pekko.{ Done, NotUsed } +import org.mockito.ArgumentMatchers.{ any, anyString } +import org.mockito.Mockito.{ atLeastOnce, doNothing, verify } +import org.scalatestplus.mockito.MockitoSugar + +import java.net.{ InetAddress, Socket } +import java.security.cert.X509Certificate +import javax.net.ssl.{ X509ExtendedKeyManager, X509ExtendedTrustManager } +import scala.concurrent.Future + +class FtpsWithTrustAndKeyManagersStageSpec extends BaseFtpsSpec with CommonFtpStageSpec with MockitoSugar { + + // The implementation of X509ExtendedTrustManager and X509ExtendedKeyManager is final so + // its not possible to put a Mockito spy on it, instead lets just mock the classes and the + // checkServerTrusted method which is executed only when trustManager/keyManager is setup in FtpsSettings + + val keyManager: X509ExtendedKeyManager = mock[X509ExtendedKeyManager] + val trustManager: X509ExtendedTrustManager = mock[X509ExtendedTrustManager] + + doNothing().when(trustManager).checkServerTrusted(any(classOf[Array[X509Certificate]]), anyString, + any(classOf[Socket])) + + override val settings = + FtpsSettings( + InetAddress.getByName(HOSTNAME)).withPort(PORT) + .withCredentials(CREDENTIALS) + .withBinary(true) + .withPassiveMode(true) + .withTrustManager(trustManager) + .withKeyManager(keyManager) + + private def verifyServerCheckCertificate(): Unit = + verify(trustManager, atLeastOnce()).checkServerTrusted(any(classOf[Array[X509Certificate]]), anyString, + any(classOf[Socket])) + + private def verifyAfterStream[O, Mat](source: Source[O, Mat]): Source[O, Mat] = + source.map { result => + verifyServerCheckCertificate() + result + } + + private def verifyAfterStream[I, Mat](sink: Sink[I, Mat]): Sink[I, Mat] = + sink.mapMaterializedValue { result => + verifyServerCheckCertificate() + result + } + + override protected def listFiles(basePath: String): Source[FtpFile, NotUsed] = + verifyAfterStream(super.listFiles(basePath)) + + override protected def listFilesWithFilter(basePath: String, branchSelector: FtpFile => Boolean, + emitTraversedDirectories: Boolean): Source[FtpFile, NotUsed] = + verifyAfterStream(super.listFilesWithFilter(basePath, branchSelector, emitTraversedDirectories)) + + override protected def retrieveFromPath(path: String, fromRoot: Boolean): Source[ByteString, Future[IOResult]] = + verifyAfterStream(super.retrieveFromPath(path, fromRoot)) + + override protected def retrieveFromPathWithOffset(path: String, offset: Long): Source[ByteString, Future[IOResult]] = + verifyAfterStream(super.retrieveFromPathWithOffset(path, offset)) + + override protected def storeToPath(path: String, append: Boolean): Sink[ByteString, Future[IOResult]] = + verifyAfterStream(super.storeToPath(path, append)) + + override protected def remove(): Sink[FtpFile, Future[IOResult]] = + verifyAfterStream(super.remove()) + + override protected def move(destinationPath: FtpFile => String): Sink[FtpFile, Future[IOResult]] = + verifyAfterStream(super.move(destinationPath)) + + override protected def mkdir(basePath: String, name: String): Source[Done, NotUsed] = + verifyAfterStream(super.mkdir(basePath, name)) + +} diff --git a/project/Dependencies.scala b/project/Dependencies.scala index d3183886c..13e221d46 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -173,7 +173,7 @@ object Dependencies { val Ftp = Seq( libraryDependencies ++= Seq( "commons-net" % "commons-net" % "3.8.0", - "com.hierynomus" % "sshj" % "0.33.0")) + "com.hierynomus" % "sshj" % "0.33.0") ++ Mockito) val GeodeVersion = "1.15.0" val GeodeVersionForDocs = "115" From c90382c5c2b69509e421ebd88b8a61558a1227c4 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Fri, 25 Aug 2023 17:47:56 +0200 Subject: [PATCH 48/90] Add FTP connector setting TrustManager/KeyManager in release notes --- docs/src/main/paradox/release-notes/index.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md index e820e18df..db2b45bb3 100644 --- a/docs/src/main/paradox/release-notes/index.md +++ b/docs/src/main/paradox/release-notes/index.md @@ -38,6 +38,7 @@ We haven't had to fix many bugs that were in Alpakka 4.0.0. * Scala 3 support ([#126](https://github.com/apache/incubator-pekko-connectors/issues/126)) * The connectors that still only support Scala 2 are MongoDB and Slick. * FTP Connector now supports UTF8 Autodetect mode ([PR221](https://github.com/apache/incubator-pekko-connectors/pull/221)) +* FTP Connector now supports setting `TrustManager`/`KeyManager` ([PR205](https://github.com/apache/incubator-pekko-connectors/pull/205)) * IronMQ Connector: changed the Circe JSON integration to use [mdedetrich/pekko-streams-circe](https://github.com/mdedetrich/pekko-streams-circe) ([PR134](https://github.com/apache/incubator-pekko-connectors/pull/134)) * S3 Connector: Add Bucket With Versioning API support ([PR84](https://github.com/apache/incubator-pekko-connectors/pull/84)) From f7b9e3d4ee09d84edfb52581f0fa9705675ea49f Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Fri, 25 Aug 2023 17:12:39 +0200 Subject: [PATCH 49/90] Remove Apache snapshot repository --- build.sbt | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/build.sbt b/build.sbt index 30bfcb0d9..fb530ff1f 100644 --- a/build.sbt +++ b/build.sbt @@ -9,10 +9,6 @@ import net.bzzt.reproduciblebuilds.ReproducibleBuildsPlugin.reproducibleBuildsCheckResolver -// TODO: Remove when Pekko has a proper release -ThisBuild / resolvers += Resolver.ApacheMavenSnapshotsRepo -ThisBuild / updateOptions := updateOptions.value.withLatestSnapshots(false) - ThisBuild / apacheSonatypeProjectProfile := "pekko" sourceDistName := "apache-pekko-connectors" sourceDistIncubating := true @@ -351,6 +347,9 @@ lazy val docs = project .enablePlugins(PekkoParadoxPlugin, ParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin) .disablePlugins(MimaPlugin) .settings( + // TODO Remove when pekko-paradox-sbt has its first release + resolvers += Resolver.ApacheMavenSnapshotsRepo, + updateOptions := updateOptions.value.withLatestSnapshots(false), Compile / paradox / name := "Apache Pekko Connectors", publish / skip := true, pekkoParadoxGithub := Some("https://github.com/apache/incubator-pekko-connectors"), From 392adbbe97210a60bfff7b12a6da0462ee90c900 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sat, 3 Jun 2023 10:20:51 +0200 Subject: [PATCH 50/90] Add protected tags to .asf.yaml --- .asf.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.asf.yaml b/.asf.yaml index 9b98ed576..a61d490fd 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -11,7 +11,10 @@ github: - reactive - reactive-streams - messaging - + + protected_tags: + - "v*.*.*" + features: # Enable wiki for documentation wiki: false From e6df411240efbc311ac3b27cb2720ff22a8d2802 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 28 Aug 2023 09:08:08 +0100 Subject: [PATCH 51/90] remove unnecessary imports (#241) --- .../stream/connectors/elasticsearch/WriteSettingsBase.scala | 4 ---- 1 file changed, 4 deletions(-) diff --git a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala index f519b101b..7acf6fb17 100644 --- a/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala +++ b/elasticsearch/src/main/scala/org/apache/pekko/stream/connectors/elasticsearch/WriteSettingsBase.scala @@ -13,10 +13,6 @@ package org.apache.pekko.stream.connectors.elasticsearch -import org.apache.pekko -import pekko.stream.connectors.elasticsearch.ElasticsearchConnectionSettings -import pekko.stream.connectors.elasticsearch.RetryLogic - /** * Configure Elasticsearch/OpenSearch sinks and flows. */ From 15eb692d210f5234972e537fe0673f10024f1948 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Tue, 29 Aug 2023 11:28:45 +0100 Subject: [PATCH 52/90] use sbt paradox release (#242) --- build.sbt | 3 --- project/build.properties | 2 +- project/plugins.sbt | 6 ++---- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/build.sbt b/build.sbt index fb530ff1f..35441e439 100644 --- a/build.sbt +++ b/build.sbt @@ -347,9 +347,6 @@ lazy val docs = project .enablePlugins(PekkoParadoxPlugin, ParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin) .disablePlugins(MimaPlugin) .settings( - // TODO Remove when pekko-paradox-sbt has its first release - resolvers += Resolver.ApacheMavenSnapshotsRepo, - updateOptions := updateOptions.value.withLatestSnapshots(false), Compile / paradox / name := "Apache Pekko Connectors", publish / skip := true, pekkoParadoxGithub := Some("https://github.com/apache/incubator-pekko-connectors"), diff --git a/project/build.properties b/project/build.properties index 52413ab79..304098715 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.9.3 +sbt.version=1.9.4 diff --git a/project/plugins.sbt b/project/plugins.sbt index fe2565adf..994178910 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -18,14 +18,12 @@ addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.9.0") addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6") addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.0") addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.7.0") + // docs -// allow access to snapshots for pekko-sbt-paradox -resolvers += Resolver.ApacheMavenSnapshotsRepo -updateOptions := updateOptions.value.withLatestSnapshots(false) // We have to deliberately use older versions of sbt-paradox because current Pekko sbt build // only loads on JDK 1.8 so we need to bring in older versions of parboiled which support JDK 1.8 -addSbtPlugin(("org.apache.pekko" % "pekko-sbt-paradox" % "0.0.0+56-bff08336-SNAPSHOT").excludeAll( +addSbtPlugin(("org.apache.pekko" % "pekko-sbt-paradox" % "1.0.0").excludeAll( "com.lightbend.paradox", "sbt-paradox", "com.lightbend.paradox" % "sbt-paradox-apidoc", "com.lightbend.paradox" % "sbt-paradox-project-info")) From 37699718278e84a5e8a22ba18b3ec4401056d9c5 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sun, 3 Sep 2023 11:51:56 +0200 Subject: [PATCH 53/90] Remove ALPAKKA from test code --- .../test/java/docs/javadsl/DocSnippetFlow.java | 2 +- .../javadsl/DocSnippetFlowWithPassThrough.java | 2 +- .../test/java/docs/javadsl/DocSnippetSink.java | 2 +- .../test/java/docs/javadsl/DocSnippetSource.java | 2 +- slick/src/test/java/docs/javadsl/SlickTest.java | 16 ++++++++-------- .../test/scala/docs/scaladsl/DocSnippets.scala | 12 +++++++----- .../src/test/scala/docs/scaladsl/SlickSpec.scala | 10 +++++----- 7 files changed, 24 insertions(+), 22 deletions(-) diff --git a/slick/src/test/java/docs/javadsl/DocSnippetFlow.java b/slick/src/test/java/docs/javadsl/DocSnippetFlow.java index e6b01570c..06b77807b 100644 --- a/slick/src/test/java/docs/javadsl/DocSnippetFlow.java +++ b/slick/src/test/java/docs/javadsl/DocSnippetFlow.java @@ -51,7 +51,7 @@ public static void main(String[] args) throws Exception { (user, connection) -> { PreparedStatement statement = connection.prepareStatement( - "INSERT INTO ALPAKKA_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); + "INSERT INTO PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); statement.setInt(1, user.id); statement.setString(2, user.name); return statement; diff --git a/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java b/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java index 7ff2ca87f..131d50ee6 100644 --- a/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java +++ b/slick/src/test/java/docs/javadsl/DocSnippetFlowWithPassThrough.java @@ -89,7 +89,7 @@ public static void main(String[] args) throws Exception { (kafkaMessage, connection) -> { PreparedStatement statement = connection.prepareStatement( - "INSERT INTO ALPAKKA_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); + "INSERT INTO PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); statement.setInt(1, kafkaMessage.msg.id); statement.setString(2, kafkaMessage.msg.name); return statement; diff --git a/slick/src/test/java/docs/javadsl/DocSnippetSink.java b/slick/src/test/java/docs/javadsl/DocSnippetSink.java index c625d3963..aa139bbc5 100644 --- a/slick/src/test/java/docs/javadsl/DocSnippetSink.java +++ b/slick/src/test/java/docs/javadsl/DocSnippetSink.java @@ -48,7 +48,7 @@ public static void main(String[] args) throws Exception { (user, connection) -> { PreparedStatement statement = connection.prepareStatement( - "INSERT INTO ALPAKKA_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); + "INSERT INTO PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); statement.setInt(1, user.id); statement.setString(2, user.name); return statement; diff --git a/slick/src/test/java/docs/javadsl/DocSnippetSource.java b/slick/src/test/java/docs/javadsl/DocSnippetSource.java index fd1e6575d..28f45d631 100644 --- a/slick/src/test/java/docs/javadsl/DocSnippetSource.java +++ b/slick/src/test/java/docs/javadsl/DocSnippetSource.java @@ -34,7 +34,7 @@ public static void main(String[] args) throws Exception { final CompletionStage done = Slick.source( session, - "SELECT ID, NAME FROM ALPAKKA_SLICK_JAVADSL_TEST_USERS", + "SELECT ID, NAME FROM PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS", (SlickRow row) -> new User(row.nextInt(), row.nextString())) .log("user") .runWith(Sink.ignore(), system); diff --git a/slick/src/test/java/docs/javadsl/SlickTest.java b/slick/src/test/java/docs/javadsl/SlickTest.java index 834362d36..e11e34d05 100644 --- a/slick/src/test/java/docs/javadsl/SlickTest.java +++ b/slick/src/test/java/docs/javadsl/SlickTest.java @@ -71,7 +71,7 @@ public class SlickTest { private static final Function insertUser = (user) -> - "INSERT INTO ALPAKKA_SLICK_JAVADSL_TEST_USERS VALUES (" + "INSERT INTO PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS VALUES (" + user.id + ", '" + user.name @@ -81,14 +81,14 @@ public class SlickTest { (user, connection) -> { PreparedStatement statement = connection.prepareStatement( - "INSERT INTO ALPAKKA_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); + "INSERT INTO PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)"); statement.setInt(1, user.id); statement.setString(2, user.name); return statement; }; private static final String selectAllUsers = - "SELECT ID, NAME FROM ALPAKKA_SLICK_JAVADSL_TEST_USERS"; + "SELECT ID, NAME FROM PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS"; @BeforeClass public static void setup() { @@ -97,19 +97,19 @@ public static void setup() { // #init-mat executeStatement( - "CREATE TABLE ALPAKKA_SLICK_JAVADSL_TEST_USERS(ID INTEGER, NAME VARCHAR(50))", + "CREATE TABLE PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS(ID INTEGER, NAME VARCHAR(50))", session, system); } @After public void cleanUp() { - executeStatement("DELETE FROM ALPAKKA_SLICK_JAVADSL_TEST_USERS", session, system); + executeStatement("DELETE FROM PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS", session, system); } @AfterClass public static void teardown() { - executeStatement("DROP TABLE ALPAKKA_SLICK_JAVADSL_TEST_USERS", session, system); + executeStatement("DROP TABLE PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS", session, system); // #close-session system.registerOnTermination(session::close); @@ -125,7 +125,7 @@ public void testSinkPSThatThrowException() { session, (__, connection) -> connection.prepareStatement( - "INSERT INTO ALPAKKA_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)")); + "INSERT INTO PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)")); assertThrows( ExecutionException.class, () -> @@ -171,7 +171,7 @@ public void testFlowPSWithRecover() throws Exception { session, (__, connection) -> connection.prepareStatement( - "INSERT INTO ALPAKKA_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)")) + "INSERT INTO PEKKO_CONNECTORS_SLICK_JAVADSL_TEST_USERS VALUES (?, ?)")) .recoverWithRetries(1, SQLException.class, () -> Source.single(-1)); final List insertionResult = usersSource diff --git a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala index fe20fd3fd..ca72c4432 100644 --- a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala +++ b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala @@ -52,7 +52,7 @@ object SlickSourceWithPlainSQLQueryExample extends App { // Stream the results of a query val done: Future[Done] = Slick - .source(sql"SELECT ID, NAME FROM ALPAKKA_SLICK_SCALADSL_TEST_USERS".as[User]) + .source(sql"SELECT ID, NAME FROM PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS".as[User]) .log("user") .runWith(Sink.ignore) // #source-example @@ -75,7 +75,7 @@ object SlickSourceWithTypedQueryExample extends App { import session.profile.api._ // The example domain - class Users(tag: Tag) extends Table[(Int, String)](tag, "ALPAKKA_SLICK_SCALADSL_TEST_USERS") { + class Users(tag: Tag) extends Table[(Int, String)](tag, "PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS") { def id = column[Int]("ID") def name = column[String]("NAME") def * = (id, name) @@ -117,7 +117,8 @@ object SlickSinkExample extends App { Source(users) .runWith( // add an optional first argument to specify the parallelism factor (Int) - Slick.sink(user => sqlu"INSERT INTO ALPAKKA_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})")) + Slick.sink(user => + sqlu"INSERT INTO PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})")) // #sink-example done.onComplete { @@ -148,7 +149,8 @@ object SlickFlowExample extends App { Source(users) .via( // add an optional first argument to specify the parallelism factor (Int) - Slick.flow(user => sqlu"INSERT INTO ALPAKKA_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})")) + Slick.flow(user => + sqlu"INSERT INTO PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})")) .log("nr-of-updated-rows") .runWith(Sink.ignore) // #flow-example @@ -194,7 +196,7 @@ object SlickFlowWithPassThroughExample extends App { // add an optional first argument to specify the parallelism factor (Int) Slick.flowWithPassThrough { kafkaMessage => val user = kafkaMessage.msg - sqlu"INSERT INTO ALPAKKA_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})" + sqlu"INSERT INTO PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})" .map { insertCount => // map db result to something else // allows to keep the kafka message offset so it can be committed in a next stage kafkaMessage.map(user => (user, insertCount)) diff --git a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala index 6563dba1f..9bb526540 100644 --- a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala +++ b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala @@ -53,7 +53,7 @@ class SlickSpec import session.profile.api._ case class User(id: Int, name: String) - class Users(tag: Tag) extends Table[(Int, String)](tag, "ALPAKKA_SLICK_SCALADSL_TEST_USERS") { + class Users(tag: Tag) extends Table[(Int, String)](tag, "PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS") { def id = column[Int]("ID") def name = column[String]("NAME") def * = (id, name) @@ -65,13 +65,13 @@ class SlickSpec val users = (1 to 40).map(i => User(i, s"Name$i")).toSet - val createTable = sqlu"""CREATE TABLE ALPAKKA_SLICK_SCALADSL_TEST_USERS(ID INTEGER, NAME VARCHAR(50))""" - val dropTable = sqlu"""DROP TABLE ALPAKKA_SLICK_SCALADSL_TEST_USERS""" - val selectAllUsers = sql"SELECT ID, NAME FROM ALPAKKA_SLICK_SCALADSL_TEST_USERS".as[User] + val createTable = sqlu"""CREATE TABLE PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS(ID INTEGER, NAME VARCHAR(50))""" + val dropTable = sqlu"""DROP TABLE PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS""" + val selectAllUsers = sql"SELECT ID, NAME FROM PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS".as[User] val typedSelectAllUsers = TableQuery[Users].result def insertUser(user: User): DBIO[Int] = - sqlu"INSERT INTO ALPAKKA_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})" + sqlu"INSERT INTO PEKKO_CONNECTORS_SLICK_SCALADSL_TEST_USERS VALUES(${user.id}, ${user.name})" def getAllUsersFromDb: Future[Set[User]] = Slick.source(selectAllUsers).runWith(Sink.seq).map(_.toSet) def populate() = { From a5c17e63434080caadc7cc377eeb6b3183789cb4 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Mon, 4 Sep 2023 15:56:52 +0200 Subject: [PATCH 54/90] Replace mentions of Alpakka in code --- docker-compose.yml | 12 +++--- .../java/docs/javadsl/IntegrationTest.java | 10 ++--- .../src/test/resources/application.conf | 2 +- .../scala/docs/scaladsl/IntegrationSpec.scala | 14 +++---- .../src/test/resources/application.conf | 2 +- .../impl/GCStorageStreamIntegrationSpec.scala | 4 +- .../scaladsl/GCStorageWiremockBase.scala | 38 +++++++++---------- .../java/docs/javadsl/HdfsReaderTest.java | 6 +-- .../java/docs/javadsl/HdfsWriterTest.java | 2 +- .../scala/docs/scaladsl/HdfsReaderSpec.scala | 6 +-- .../scala/docs/scaladsl/HdfsWriterSpec.scala | 2 +- reference/src/main/resources/application.conf | 2 +- s3/src/main/resources/reference.conf | 2 +- .../stream/connectors/s3/MinioS3Test.scala | 2 +- scripts/link-validator.conf | 2 +- 15 files changed, 53 insertions(+), 53 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index adb6e81e0..ce8e07414 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -133,21 +133,21 @@ services: image: google/cloud-sdk:311.0.0 ports: - "8538:8538" - command: gcloud beta emulators pubsub start --project=alpakka --host-port=0.0.0.0:8538 + command: gcloud beta emulators pubsub start --project=pekko-connectors --host-port=0.0.0.0:8538 gcloud-pubsub-emulator_prep: image: martynas/gcloud-pubsub-client links: - "gcloud-pubsub-emulator" environment: - - "PUBSUB_PROJECT_ID=alpakka" + - "PUBSUB_PROJECT_ID=pekko-connectors" - "PUBSUB_EMULATOR_HOST=gcloud-pubsub-emulator:8538" entrypoint: "" command: > bash -c " - python publisher.py alpakka create simpleTopic && - python subscriber.py alpakka create simpleTopic simpleSubscription - python publisher.py alpakka create testTopic && - python subscriber.py alpakka create testTopic testSubscription + python publisher.py pekko-connectors create simpleTopic && + python subscriber.py pekko-connectors create simpleTopic simpleSubscription + python publisher.py pekko-connectors create testTopic && + python subscriber.py pekko-connectors create testTopic testSubscription " hbase: image: harisekhon/hbase:1.4 diff --git a/google-cloud-pub-sub-grpc/src/test/java/docs/javadsl/IntegrationTest.java b/google-cloud-pub-sub-grpc/src/test/java/docs/javadsl/IntegrationTest.java index 7cdbaa14a..8a172fecd 100644 --- a/google-cloud-pub-sub-grpc/src/test/java/docs/javadsl/IntegrationTest.java +++ b/google-cloud-pub-sub-grpc/src/test/java/docs/javadsl/IntegrationTest.java @@ -55,7 +55,7 @@ public class IntegrationTest { public void shouldPublishAMessage() throws InterruptedException, ExecutionException, TimeoutException { // #publish-single - final String projectId = "alpakka"; + final String projectId = "pekko-connectors"; final String topic = "simpleTopic"; final PubsubMessage publishMessage = @@ -84,7 +84,7 @@ public void shouldPublishAMessage() public void shouldPublishBatch() throws InterruptedException, ExecutionException, TimeoutException { // #publish-fast - final String projectId = "alpakka"; + final String projectId = "pekko-connectors"; final String topic = "simpleTopic"; final PubsubMessage publishMessage = @@ -113,7 +113,7 @@ public void shouldPublishBatch() public void shouldSubscribeStream() throws InterruptedException, ExecutionException, TimeoutException { // #subscribe-stream - final String projectId = "alpakka"; + final String projectId = "pekko-connectors"; final String subscription = "simpleSubscription"; final StreamingPullRequest request = @@ -152,7 +152,7 @@ public void shouldSubscribeStream() public void shouldSubscribeSync() throws InterruptedException, ExecutionException, TimeoutException { // #subscribe-sync - final String projectId = "alpakka"; + final String projectId = "pekko-connectors"; final String subscription = "simpleSubscription"; final PullRequest request = @@ -189,7 +189,7 @@ public void shouldSubscribeSync() @Test public void shouldAcknowledge() { - final String projectId = "alpakka"; + final String projectId = "pekko-connectors"; final String subscription = "simpleSubscription"; final StreamingPullRequest request = diff --git a/google-cloud-pub-sub-grpc/src/test/resources/application.conf b/google-cloud-pub-sub-grpc/src/test/resources/application.conf index 8cc5f3a21..135619523 100644 --- a/google-cloud-pub-sub-grpc/src/test/resources/application.conf +++ b/google-cloud-pub-sub-grpc/src/test/resources/application.conf @@ -11,7 +11,7 @@ pekko.connectors.google.cloud.pubsub.grpc { # * go to the console at https://console.cloud.google.com # * Create a compute engine service account as documented at https://cloud.google.com/docs/authentication/production#creating_a_service_account # * Point GOOGLE_APPLICATION_CREDENTIALS to the downloaded JSON key and start sbt - # * Create a project, and update IntegrationSpec to use that project ID rather than "alpakka" + # * Create a project, and update IntegrationSpec to use that project ID rather than "pekko-connectors" # * Under 'Pub/Sub', 'Topics' create a topic 'simpleTopic' with a Google-managed key # * Under 'Pub/Sub', 'Subscriptions' create a subscription 'simpleSubscription' for this topic # * For 'republish', also create 'testTopic' and 'testSubscription' diff --git a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala index fcea88765..3e088cecc 100644 --- a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala +++ b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala @@ -58,7 +58,7 @@ class IntegrationSpec "publish a message" in { // #publish-single - val projectId = "alpakka" + val projectId = "pekko-connectors" val topic = "simpleTopic" val publishMessage: PubsubMessage = @@ -84,7 +84,7 @@ class IntegrationSpec "publish batch" in { // #publish-fast - val projectId = "alpakka" + val projectId = "pekko-connectors" val topic = "simpleTopic" val publishMessage: PubsubMessage = @@ -108,7 +108,7 @@ class IntegrationSpec "subscribe streaming" in { // #subscribe-stream - val projectId = "alpakka" + val projectId = "pekko-connectors" val subscription = "simpleSubscription" val request = StreamingPullRequest() @@ -139,7 +139,7 @@ class IntegrationSpec "subscribe sync" in { // #subscribe-sync - val projectId = "alpakka" + val projectId = "pekko-connectors" val subscription = "simpleSubscription" val request = PullRequest() @@ -169,7 +169,7 @@ class IntegrationSpec } "acknowledge" in { - val projectId = "alpakka" + val projectId = "pekko-connectors" val subscription = "simpleSubscription" val request = StreamingPullRequest() @@ -199,7 +199,7 @@ class IntegrationSpec } "acknowledge flow" in { - val projectId = "alpakka" + val projectId = "pekko-connectors" val subscription = "simpleSubscription" val request = StreamingPullRequest() @@ -223,7 +223,7 @@ class IntegrationSpec "republish" in { val msg = "Labas!" - val projectId = "alpakka" + val projectId = "pekko-connectors" val topic = "testTopic" val subscription = "testSubscription" diff --git a/google-cloud-pub-sub/src/test/resources/application.conf b/google-cloud-pub-sub/src/test/resources/application.conf index fa52bb687..0bed42329 100644 --- a/google-cloud-pub-sub/src/test/resources/application.conf +++ b/google-cloud-pub-sub/src/test/resources/application.conf @@ -28,7 +28,7 @@ pekko.connectors.google { credentials { provider = none none { - project-id = "alpakka" + project-id = "pekko-connectors" token = "ya29.Elz4A2XkfGKJ4CoS5x_umUBHsvjGdeWQzu6gRRCnNXI0fuIyoDP_6aYktBQEOI4YAhLNgUl2OpxWQaN8Z3hd5YfFw1y4EGAtr2o28vSID-c8ul_xxHuudE7RmhH9sg" } } diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala index d891f8241..ab1610323 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStreamIntegrationSpec.scala @@ -42,7 +42,7 @@ import scala.concurrent.Future * storage object admin * storage admin (to run the create/delete bucket test) * - modify test/resources/application.conf - * - create a `alpakka` bucket for testing + * - create a `pekko-connectors` bucket for testing * - create a rewrite `pekko-connectors-rewrite` bucket for testing */ class GCStorageStreamIntegrationSpec @@ -80,7 +80,7 @@ class GCStorageStreamIntegrationSpec "GCStorageStream" should { "be able to create and delete a bucket" ignore { - val randomBucketName = s"alpakka_${UUID.randomUUID().toString}" + val randomBucketName = s"pekko-connectors_${UUID.randomUUID().toString}" val res = for { bucket <- GCStorageStream.createBucketSource(randomBucketName, "europe-west1").runWith(Sink.head) diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala index 590552368..7eb852643 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala @@ -60,13 +60,13 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove | "size":"5", | "generation":"$generation", | "timeCreated":"2018-11-24T10:24:13.992Z", - | "selfLink":"https://www.googleapis.com/storage/v1/b/alpakka/o/$fileName", + | "selfLink":"https://www.googleapis.com/storage/v1/b/pekko-connectors/o/$fileName", | "timeStorageClassUpdated":"2018-11-24T10:24:13.992Z", | "storageClass":"MULTI_REGIONAL", - | "id":"alpakka/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", + | "id":"pekko-connectors/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", | "contentType":"text/plain; charset=UTF-8", | "updated":"2018-11-24T10:24:13.992Z", - | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/alpakka/o/$fileName?generation=1543055053992768&alt=media", + | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/pekko-connectors/o/$fileName?generation=1543055053992768&alt=media", | "bucket":"connectors", | "kind":"storage#object", | "metageneration":"1", @@ -124,9 +124,9 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove | "name":"$bucketName", | "location":"EUROPE-WEST1", | "timeCreated":"2018-11-24T06:51:56.529Z", - | "selfLink":"https://www.googleapis.com/storage/v1/b/alpakka_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", + | "selfLink":"https://www.googleapis.com/storage/v1/b/pekko-connectors_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", | "storageClass":"STANDARD", - | "id":"alpakka_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", + | "id":"pekko-connectors_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", | "updated":"2018-11-24T06:51:56.529Z", | "projectNumber":"250058024243", | "iamConfiguration":{"bucketPolicyOnly":{"enabled":false}}, @@ -193,9 +193,9 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove | "name":"$bucketName", | "location":"EUROPE-WEST1", | "timeCreated":"2018-11-24T06:51:56.529Z", - | "selfLink":"https://www.googleapis.com/storage/v1/b/alpakka_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", + | "selfLink":"https://www.googleapis.com/storage/v1/b/pekko-connectors_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", | "storageClass":"STANDARD", - | "id":"alpakka_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", + | "id":"pekko-connectors_7fb9d77f-d327-42db-b0d6-538db2a1a3ae", | "updated":"2018-11-24T06:51:56.529Z", | "projectNumber":"250058024243", | "iamConfiguration":{"bucketPolicyOnly":{"enabled":false}}, @@ -366,13 +366,13 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove | "crc32c":"AtvFhg==", | "md5Hash":"emjwm9mSZxuzsZpecLeCfg==", | "timeCreated":"2018-11-24T10:24:13.992Z", - | "selfLink":"https://www.googleapis.com/storage/v1/b/alpakka/o/$firstFileName", + | "selfLink":"https://www.googleapis.com/storage/v1/b/pekko-connectors/o/$firstFileName", | "timeStorageClassUpdated":"2018-11-24T10:24:13.992Z", | "storageClass":"MULTI_REGIONAL", - | "id":"alpakka/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", + | "id":"pekko-connectors/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", | "contentType":"text/plain; charset=UTF-8", | "updated":"2018-11-24T10:24:13.992Z", - | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/alpakka/o/$firstFileName?generation=1543055053992768&alt=media", + | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/pekko-connectors/o/$firstFileName?generation=1543055053992768&alt=media", | "bucket":"connectors", | "kind":"storage#object", | "metageneration":"1", @@ -395,13 +395,13 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove | "crc32c":"AtvFhg==", | "md5Hash":"emjwm9mSZxuzsZpecLeCfg==", | "timeCreated":"2018-11-24T10:24:13.992Z", - | "selfLink":"https://www.googleapis.com/storage/v1/b/alpakka/o/$firstFileName", + | "selfLink":"https://www.googleapis.com/storage/v1/b/pekko-connectors/o/$firstFileName", | "timeStorageClassUpdated":"2018-11-24T10:24:13.992Z", | "storageClass":"MULTI_REGIONAL", - | "id":"alpakka/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", + | "id":"pekko-connectors/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", | "contentType":"text/plain; charset=UTF-8", | "updated":"2018-11-24T10:24:13.992Z", - | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/alpakka/o/$firstFileName?generation=1543055053992768&alt=media", + | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/pekko-connectors/o/$firstFileName?generation=1543055053992768&alt=media", | "bucket":"connectors", | "kind":"storage#object", | "metageneration":"1", @@ -436,13 +436,13 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove | "crc32c":"AtvFhg==", | "md5Hash":"emjwm9mSZxuzsZpecLeCfg==", | "timeCreated":"2018-11-24T10:24:13.992Z", - | "selfLink":"https://www.googleapis.com/storage/v1/b/alpakka/o/$secondFileName", + | "selfLink":"https://www.googleapis.com/storage/v1/b/pekko-connectors/o/$secondFileName", | "timeStorageClassUpdated":"2018-11-24T10:24:13.992Z", | "storageClass":"MULTI_REGIONAL", - | "id":"alpakka/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", + | "id":"pekko-connectors/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", | "contentType":"text/plain; charset=UTF-8", | "updated":"2018-11-24T10:24:13.992Z", - | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/alpakka/o/$secondFileName?generation=1543055053992768&alt=media", + | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/pekko-connectors/o/$secondFileName?generation=1543055053992768&alt=media", | "bucket":"connectors", | "kind":"storage#object", | "metageneration":"1", @@ -732,13 +732,13 @@ abstract class GCStorageWiremockBase(_system: ActorSystem, _wireMockServer: Hove | "crc32c":"AtvFhg==", | "md5Hash":"emjwm9mSZxuzsZpecLeCfg==", | "timeCreated":"2018-11-24T10:24:13.992Z", - | "selfLink":"https://www.googleapis.com/storage/v1/b/alpakka/o/$fileName", + | "selfLink":"https://www.googleapis.com/storage/v1/b/pekko-connectors/o/$fileName", | "timeStorageClassUpdated":"2018-11-24T10:24:13.992Z", | "storageClass":"MULTI_REGIONAL", - | "id":"alpakka/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", + | "id":"pekko-connectors/GoogleCloudStorageClientIntegrationSpec63f9feb6-e800-472b-a51f-48e1c6d5d43f/testa.txt/1543055053992768", | "contentType":"text/plain; charset=UTF-8", | "updated":"2018-11-24T10:24:13.992Z", - | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/alpakka/o/$fileName?generation=1543055053992768&alt=media", + | "mediaLink":"https://www.googleapis.com/download/storage/v1/b/pekko-connectors/o/$fileName?generation=1543055053992768&alt=media", | "bucket":"$rewriteBucketName", | "kind":"storage#object", | "metageneration":"1", diff --git a/hdfs/src/test/java/docs/javadsl/HdfsReaderTest.java b/hdfs/src/test/java/docs/javadsl/HdfsReaderTest.java index bb6530a06..0588e2a2d 100644 --- a/hdfs/src/test/java/docs/javadsl/HdfsReaderTest.java +++ b/hdfs/src/test/java/docs/javadsl/HdfsReaderTest.java @@ -67,7 +67,7 @@ public void testReadDataFile() throws Exception { List readData = new ArrayList<>(); for (RotationMessage log : logs) { - Path path = new Path("/tmp/alpakka", log.path()); + Path path = new Path("/tmp/pekko-connectors", log.path()); // #define-data-source Source> source = HdfsSource.data(fs, path); // #define-data-source @@ -103,7 +103,7 @@ public void testCompressedDataFile() throws Exception { List readData = new ArrayList<>(); for (RotationMessage log : logs) { - Path path = new Path("/tmp/alpakka", log.path()); + Path path = new Path("/tmp/pekko-connectors", log.path()); // #define-compressed-source Source> source = HdfsSource.compressed(fs, path, codec); // #define-compressed-source @@ -141,7 +141,7 @@ public void testReadSequenceFile() throws Exception { List> readData = new ArrayList<>(); for (RotationMessage log : logs) { - Path path = new Path("/tmp/alpakka", log.path()); + Path path = new Path("/tmp/pekko-connectors", log.path()); // #define-sequence-source Source, NotUsed> source = HdfsSource.sequence(fs, path, Text.class, Text.class); diff --git a/hdfs/src/test/java/docs/javadsl/HdfsWriterTest.java b/hdfs/src/test/java/docs/javadsl/HdfsWriterTest.java index e845003fd..0a0af536a 100644 --- a/hdfs/src/test/java/docs/javadsl/HdfsWriterTest.java +++ b/hdfs/src/test/java/docs/javadsl/HdfsWriterTest.java @@ -181,7 +181,7 @@ public void testDataWriterWithTimeRotation() throws Exception { Source.tick( java.time.Duration.ofMillis(0), java.time.Duration.ofMillis(50), - ByteString.fromString("I love Alpakka!")) + ByteString.fromString("I love Pekko Connectors!")) .map(HdfsWriteMessage::create) .via( HdfsFlow.data( diff --git a/hdfs/src/test/scala/docs/scaladsl/HdfsReaderSpec.scala b/hdfs/src/test/scala/docs/scaladsl/HdfsReaderSpec.scala index 67d861080..101eefde7 100644 --- a/hdfs/src/test/scala/docs/scaladsl/HdfsReaderSpec.scala +++ b/hdfs/src/test/scala/docs/scaladsl/HdfsReaderSpec.scala @@ -72,7 +72,7 @@ class HdfsReaderSpec Future .sequence( logs.map { log => - val path = new Path("/tmp/alpakka", log.path) + val path = new Path("/tmp/pekko-connectors", log.path) // #define-data-source val source = HdfsSource.data(fs, path) // #define-data-source @@ -108,7 +108,7 @@ class HdfsReaderSpec Future .sequence( logs.map { log => - val path = new Path("/tmp/alpakka", log.path) + val path = new Path("/tmp/pekko-connectors", log.path) // #define-compressed-source val source = HdfsSource.compressed(fs, path, codec) // #define-compressed-source @@ -142,7 +142,7 @@ class HdfsReaderSpec Future .sequence( logs.map { log => - val path = new Path("/tmp/alpakka", log.path) + val path = new Path("/tmp/pekko-connectors", log.path) // #define-sequence-source val source = HdfsSource.sequence(fs, path, classOf[Text], classOf[Text]) // #define-sequence-source diff --git a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala index 3587f53e4..1bfbee788 100644 --- a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala +++ b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala @@ -190,7 +190,7 @@ class HdfsWriterSpec "use time rotation" in { val (cancellable, resF) = Source - .tick(0.millis, 50.milliseconds, ByteString("I love Alpakka!")) + .tick(0.millis, 50.milliseconds, ByteString("I love Pekko Connectors!")) .map(HdfsWriteMessage(_)) .via( HdfsFlow.data( diff --git a/reference/src/main/resources/application.conf b/reference/src/main/resources/application.conf index f7ef58c2f..0e12252c3 100644 --- a/reference/src/main/resources/application.conf +++ b/reference/src/main/resources/application.conf @@ -1,6 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 -# use 'alpakka' namespace for connector settings +# use 'pekko-connectors' namespace for connector settings pekko.connectors.reference { # give brief summary about the setting diff --git a/s3/src/main/resources/reference.conf b/s3/src/main/resources/reference.conf index 8360747f1..d24cd82ee 100644 --- a/s3/src/main/resources/reference.conf +++ b/s3/src/main/resources/reference.conf @@ -88,7 +88,7 @@ pekko.connectors.s3 { # Custom endpoint url, used for alternate s3 implementations # To enable virtual-host-style access with Apache Pekko Connectors S3 use the placeholder `{bucket}` in the URL - # eg. endpoint-url = "http://{bucket}.s3minio.alpakka:9000" + # eg. endpoint-url = "http://{bucket}.s3minio.pekko-connectors:9000" # # endpoint-url = null diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioS3Test.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioS3Test.scala index caaa5fccd..82353e12a 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioS3Test.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/MinioS3Test.scala @@ -21,7 +21,7 @@ import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCred trait MinioS3Test extends ForAllTestContainer with TestKitBase { self: Suite => val S3DummyAccessKey = "TESTKEY" val S3DummySecretKey = "TESTSECRET" - val S3DummyDomain = "s3minio.alpakka" + val S3DummyDomain = "s3minio.pekko-connectors" override lazy val container: MinioContainer = new MinioContainer(S3DummyAccessKey, S3DummySecretKey, S3DummyDomain) diff --git a/scripts/link-validator.conf b/scripts/link-validator.conf index 0c3a677dc..c1502c4a9 100644 --- a/scripts/link-validator.conf +++ b/scripts/link-validator.conf @@ -17,7 +17,7 @@ site-link-validator { ] ignore-missing-local-files-regex = - "^api/alpakka/snapshot/akka/stream/connectors/googlecloud/storage/impl/Formats.*" + "^api/pekko-connectors/snapshot/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.*" ignore-prefixes = [ # Fails after a number of requests with "403 Forbidden" From 69b789086f82f17a18c24fa6ccaefe23e4b55414 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 4 Sep 2023 17:29:31 +0100 Subject: [PATCH 55/90] use test token in tests (#245) * use test token in tests * format --- google-cloud-pub-sub/src/test/resources/application.conf | 2 +- .../connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala | 3 +-- .../src/test/scala/docs/scaladsl/GCStorageSourceSpec.scala | 3 +-- .../googlecloud/storage/scaladsl/GCStorageWiremockBase.scala | 3 +-- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/google-cloud-pub-sub/src/test/resources/application.conf b/google-cloud-pub-sub/src/test/resources/application.conf index 0bed42329..81cf6ad95 100644 --- a/google-cloud-pub-sub/src/test/resources/application.conf +++ b/google-cloud-pub-sub/src/test/resources/application.conf @@ -29,7 +29,7 @@ pekko.connectors.google { provider = none none { project-id = "pekko-connectors" - token = "ya29.Elz4A2XkfGKJ4CoS5x_umUBHsvjGdeWQzu6gRRCnNXI0fuIyoDP_6aYktBQEOI4YAhLNgUl2OpxWQaN8Z3hd5YfFw1y4EGAtr2o28vSID-c8ul_xxHuudE7RmhH9sg" + token = "TESTTOKEN" } } } diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala index 6228752a1..f443719d1 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala @@ -95,8 +95,7 @@ class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures val config = PubSubConfig() - val accessToken = - "ya29.Elz4A2XkfGKJ4CoS5x_umUBHsvjGdeWQzu6gRRCnNXI0fuIyoDP_6aYktBQEOI4YAhLNgUl2OpxWQaN8Z3hd5YfFw1y4EGAtr2o28vSID-c8ul_xxHuudE7RmhH9sg" + val accessToken = "TESTTOKEN" it should "publish" in { diff --git a/google-cloud-storage/src/test/scala/docs/scaladsl/GCStorageSourceSpec.scala b/google-cloud-storage/src/test/scala/docs/scaladsl/GCStorageSourceSpec.scala index 026ac42e7..4d4bee4e4 100644 --- a/google-cloud-storage/src/test/scala/docs/scaladsl/GCStorageSourceSpec.scala +++ b/google-cloud-storage/src/test/scala/docs/scaladsl/GCStorageSourceSpec.scala @@ -573,6 +573,5 @@ class GCStorageSourceSpec } object TestCredentials { - val accessToken = - "ya29.Elz4A2XkfGKJ4CoS5x_umUBHsvjGdeWQzu6gRRCnNXI0fuIyoDP_6aYktBQEOI4YAhLNgUl2OpxWQaN8Z3hd5YfFw1y4EGAtr2o28vSID-c8ul_xxHuudE7RmhH9sg" + val accessToken = "TESTTOKEN" } diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala index 7eb852643..35f18241c 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/scaladsl/GCStorageWiremockBase.scala @@ -889,6 +889,5 @@ object GCStorageWiremockBase { } object TestCredentials { - val accessToken = - "ya29.Elz4A2XkfGKJ4CoS5x_umUBHsvjGdeWQzu6gRRCnNXI0fuIyoDP_6aYktBQEOI4YAhLNgUl2OpxWQaN8Z3hd5YfFw1y4EGAtr2o28vSID-c8ul_xxHuudE7RmhH9sg" + val accessToken = "TESTTOKEN" } From 83a23dbbc6953e62dbd847b1dfb3ca87a0e72f96 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Tue, 5 Sep 2023 10:46:44 +0200 Subject: [PATCH 56/90] Use fetch tags from latest checkout action --- .github/workflows/check-build-test.yml | 30 +++++++----------- .github/workflows/dependency-graph.yml | 2 +- .github/workflows/format.yml | 2 +- .github/workflows/headers.yml | 5 ++- .../workflows/license-and-vulnerabilities.yml | 31 ------------------- .github/workflows/link-validator.yml | 10 +++--- .github/workflows/publish-1.0-docs.yml | 4 +-- .github/workflows/publish-nightly.yml | 4 +-- 8 files changed, 26 insertions(+), 62 deletions(-) delete mode 100644 .github/workflows/license-and-vulnerabilities.yml diff --git a/.github/workflows/check-build-test.yml b/.github/workflows/check-build-test.yml index 52c5fd767..08439667a 100644 --- a/.github/workflows/check-build-test.yml +++ b/.github/workflows/check-build-test.yml @@ -23,13 +23,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 - # temporarily do full checkout - # with: # https://github.com/olafurpg/setup-scala#faster-checkout-of-big-repos - # fetch-depth: 100 - - #- name: Fetch tags - # run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* + uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: 0 - name: Setup Java 8 uses: actions/setup-java@v3 @@ -52,13 +49,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 - # temporarily do full checkout - # with: # https://github.com/olafurpg/setup-scala#faster-checkout-of-big-repos - # fetch-depth: 100 - - #- name: Fetch tags - # run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* + uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: 0 - name: Setup Java 11 uses: actions/setup-java@v3 @@ -137,10 +131,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 - # temporarily do full checkout - # with: # gh-detect-changes.sh compares with the target branch - # fetch-depth: 0 + uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: 0 - name: Setup Java 8 uses: actions/setup-java@v3 diff --git a/.github/workflows/dependency-graph.yml b/.github/workflows/dependency-graph.yml index f8facc045..e96c3efbc 100644 --- a/.github/workflows/dependency-graph.yml +++ b/.github/workflows/dependency-graph.yml @@ -8,5 +8,5 @@ jobs: name: Update Dependency Graph runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: scalacenter/sbt-dependency-submission@v2 diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index d90635f52..2150690a6 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout current branch (full) - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 persist-credentials: false diff --git a/.github/workflows/headers.yml b/.github/workflows/headers.yml index 6eda4f3a3..e040d34cb 100644 --- a/.github/workflows/headers.yml +++ b/.github/workflows/headers.yml @@ -11,7 +11,10 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: 0 - name: Setup Java 8 uses: actions/setup-java@v3 diff --git a/.github/workflows/license-and-vulnerabilities.yml b/.github/workflows/license-and-vulnerabilities.yml deleted file mode 100644 index e1d096aa5..000000000 --- a/.github/workflows/license-and-vulnerabilities.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: License and vulnerabilty scanning - -on: - push: - branches: - - master - - main - tags-ignore: - - v* - -jobs: - license-and-vulnerabilities-scanning: - name: Analyse - if: github.repository == 'apache/incubator-pekko-connectors' - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v3 - with: # https://github.com/olafurpg/setup-scala#faster-checkout-of-big-repos - fetch-depth: 100 - - name: Fetch tags - run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* - #- name: FOSSA policy check - # if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == 'apache/incubator-pekko-connectors' }} - # run: |- - # curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/spectrometer/master/install.sh | bash - # fossa analyze && fossa test - # env: - # # The FOSSA_API_KEY is configured on Akka organisation level - # FOSSA_API_KEY: "${{secrets.FOSSA_API_KEY}}" diff --git a/.github/workflows/link-validator.yml b/.github/workflows/link-validator.yml index 2599456bf..4bf4e7230 100644 --- a/.github/workflows/link-validator.yml +++ b/.github/workflows/link-validator.yml @@ -10,12 +10,10 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v3 - with: # https://github.com/olafurpg/setup-scala#faster-checkout-of-big-repos - fetch-depth: 100 - - - name: Fetch tags - run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* + uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: 0 - name: Setup Java 11 uses: actions/setup-java@v3 diff --git a/.github/workflows/publish-1.0-docs.yml b/.github/workflows/publish-1.0-docs.yml index 583304679..c4b449302 100644 --- a/.github/workflows/publish-1.0-docs.yml +++ b/.github/workflows/publish-1.0-docs.yml @@ -13,9 +13,9 @@ jobs: JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves + fetch-tags: true fetch-depth: 0 - name: Setup Java 8 diff --git a/.github/workflows/publish-nightly.yml b/.github/workflows/publish-nightly.yml index 23b6072e5..a7f0a2dc3 100644 --- a/.github/workflows/publish-nightly.yml +++ b/.github/workflows/publish-nightly.yml @@ -15,9 +15,9 @@ jobs: JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves + fetch-tags: true fetch-depth: 0 - name: Setup Java 8 From 2267a8bc1be579fba722b3f9830a019a2584e2fe Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Wed, 6 Sep 2023 23:54:31 +0200 Subject: [PATCH 57/90] Fix typo in checkCodeStyle --- build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index 35441e439..84a688fa2 100644 --- a/build.sbt +++ b/build.sbt @@ -125,7 +125,7 @@ lazy val `pekko-connectors` = project ) addCommandAlias("applyCodeStyle", ";scalafmtAll; scalafmtSbt; javafmtAll; +headerCreateAll") -addCommandAlias("checkCodeStyle", "+headerCheckAll; ;scalafmtCheckAll; scalafmtSbtCheck; javafmtCheckAll") +addCommandAlias("checkCodeStyle", ";+headerCheckAll; scalafmtCheckAll; scalafmtSbtCheck; javafmtCheckAll") lazy val amqp = pekkoConnectorProject("amqp", "amqp", Dependencies.Amqp) From 141052cecfac4702980955792b71260ad8a944b3 Mon Sep 17 00:00:00 2001 From: kerr Date: Thu, 7 Sep 2023 14:10:59 +0800 Subject: [PATCH 58/90] =sbt Update sbt-mima-plugin to 1.1.3 --- project/plugins.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index 994178910..e30fc1c26 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -16,7 +16,7 @@ addSbtPlugin("com.github.sbt" % "sbt-license-report" % "1.6.1") // discipline addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.9.0") addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6") -addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.0") +addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.3") addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.7.0") // docs From 984f11c8b2f54700179011de89f7a5e89f83cb3d Mon Sep 17 00:00:00 2001 From: kerr Date: Thu, 7 Sep 2023 15:24:35 +0800 Subject: [PATCH 59/90] =sbt Update Scala to 3.3.1 --- project/Dependencies.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 13e221d46..1919cc59e 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -17,7 +17,7 @@ object Dependencies { val Scala213 = "2.13.10" // update even in link-validator.conf val Scala212 = "2.12.17" - val Scala3 = "3.3.0" + val Scala3 = "3.3.1" val ScalaVersions = Seq(Scala213, Scala212, Scala3) val PekkoVersion = "1.0.1" From 92c0f9488d682f77dd2dc11d90576c44d8fb5455 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Tue, 6 Jun 2023 19:11:25 +0200 Subject: [PATCH 60/90] Update Scala 2 versions --- project/Dependencies.scala | 4 ++-- scripts/link-validator.conf | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 1919cc59e..4d443d62e 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -15,8 +15,8 @@ object Dependencies { val CronBuild = sys.env.get("GITHUB_EVENT_NAME").contains("schedule") - val Scala213 = "2.13.10" // update even in link-validator.conf - val Scala212 = "2.12.17" + val Scala213 = "2.13.12" // update even in link-validator.conf + val Scala212 = "2.12.18" val Scala3 = "3.3.1" val ScalaVersions = Seq(Scala213, Scala212, Scala3) diff --git a/scripts/link-validator.conf b/scripts/link-validator.conf index c1502c4a9..37dac1b74 100644 --- a/scripts/link-validator.conf +++ b/scripts/link-validator.conf @@ -39,7 +39,7 @@ site-link-validator { "http://www.thedevpiece.com/" # genereated by @apidoc "http://pravega.io/" - "http://www.scala-lang.org/api/2.13.10/scala/concurrent/Future.html" - "http://www.scala-lang.org/api/2.13.10/scala/util/Try.html" + "http://www.scala-lang.org/api/2.13.12/scala/concurrent/Future.html" + "http://www.scala-lang.org/api/2.13.12/scala/util/Try.html" ] } From 87dca1fdf2a8e59340c25e330682f2d63e11af7f Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Thu, 14 Sep 2023 09:23:38 +0200 Subject: [PATCH 61/90] Update to SBT 1.9.5 and use Apache Staging repo constant --- build.sbt | 3 +-- project/build.properties | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/build.sbt b/build.sbt index 84a688fa2..454f1533d 100644 --- a/build.sbt +++ b/build.sbt @@ -19,8 +19,7 @@ commands := commands.value.filterNot { command => } } -ThisBuild / reproducibleBuildsCheckResolver := - "Apache Pekko Staging".at("https://repository.apache.org/content/groups/staging/") +ThisBuild / reproducibleBuildsCheckResolver := Resolver.ApacheMavenStagingRepo lazy val `pekko-connectors` = project .in(file(".")) diff --git a/project/build.properties b/project/build.properties index 304098715..51b51fce6 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.9.4 +sbt.version=1.9.5 From 288c51572827963783e0aa1ea2dc9653e78d9724 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Fri, 15 Sep 2023 21:08:49 +0200 Subject: [PATCH 62/90] Update SBT to 1.9.6 --- project/build.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/build.properties b/project/build.properties index 51b51fce6..27430827b 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.9.5 +sbt.version=1.9.6 From 529ed8d69fdd12792895c7f18130316711240ada Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Wed, 20 Sep 2023 09:08:48 +0200 Subject: [PATCH 63/90] Fix apiURL for autoAPIMappings --- docs/src/main/paradox/release-notes/index.md | 7 +++++++ project/Common.scala | 6 ++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md index db2b45bb3..352dab15f 100644 --- a/docs/src/main/paradox/release-notes/index.md +++ b/docs/src/main/paradox/release-notes/index.md @@ -2,6 +2,13 @@ To understand the forces on version numbers, read about @ref:[Apache Pekko Connectors' versioning scheme](../other-docs/versioning.md). +## 1.0.1 + +### Bug Fixes + +* Fix `apiURL` so that projects depending on pekko-connectors have the correct +url in their scaladocs via sbt's [autoAPIMappings](https://www.scala-sbt.org/1.x/docs/Howto-Scaladoc.html#Define+the+location+of+API+documentation+for+a+library) feature ([PR252](https://github.com/apache/incubator-pekko-connectors/pull/252)) + ## 1.0.0 Apache Pekko Connectors 1.0.0 is based on Alpakka 4.0.0. Pekko came about as a result of Lightbend's decision to make future diff --git a/project/Common.scala b/project/Common.scala index adaa1f49b..b71dcfcc1 100644 --- a/project/Common.scala +++ b/project/Common.scala @@ -111,8 +111,10 @@ object Common extends AutoPlugin { case _ => Seq.empty[String] }), autoAPIMappings := true, - apiURL := Some(url( - s"https://pekko.apache.org/api/pekko-connectors/${version.value}/org/apache/pekko/stream/connectors/index.html")), + apiURL := { + val apiVersion = if (isSnapshot.value) "current" else version.value + Some(url(s"https://pekko.apache.org/api/pekko-connectors/$apiVersion/")) + }, // show full stack traces and test case durations Test / testOptions += Tests.Argument("-oDF"), // -a Show stack traces and exception class name for AssertionErrors. From 89e744bb95ce0125b8d0c3a5208198bf18c50e2d Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Fri, 29 Sep 2023 10:58:46 +0100 Subject: [PATCH 64/90] some lightbend branding (#254) --- CONTRIBUTING.md | 1 - docs/src/main/paradox/pravega.md | 2 +- docs/src/main/paradox/spring-web.md | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bdbdd52d6..c572ea51b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -102,5 +102,4 @@ git config blame.ignoreRevsFile .git-blame-ignore-revs 1. [GitHub actions](https://github.com/apache/incubator-pekko-connectors/actions) automatically merge the code, builds it, runs the tests and sets Pull Request status accordingly of results in GitHub. 1. [Scalafmt](http://scalameta.org/scalafmt/) enforces some of the code style rules. 1. [sbt-header plugin](https://github.com/sbt/sbt-header) manages consistent copyright headers in every source file. -1. A GitHub bot checks whether you've signed the Lightbend CLA. 1. Enabling `fatalWarnings := true` for all projects. diff --git a/docs/src/main/paradox/pravega.md b/docs/src/main/paradox/pravega.md index 51bb08acc..c0459a25a 100644 --- a/docs/src/main/paradox/pravega.md +++ b/docs/src/main/paradox/pravega.md @@ -53,7 +53,7 @@ Two categories of properties can/must be provided to configure the connector. reference.conf : @@snip(/pravega/src/main/resources/reference.conf) -The Pravega connector can automatically configure the Pravega client by supplying Lightbend configuration in an +The Pravega connector can automatically configure the Pravega client by supplying [configuration](https://github.com/lightbend/config) in an application.conf, or it can be set programmatically with @apidoc[ReaderSettingsBuilder$] or @apidoc[WriterSettingsBuilder$]. See the following sections for examples. diff --git a/docs/src/main/paradox/spring-web.md b/docs/src/main/paradox/spring-web.md index 9dcaa628b..fa22f6867 100644 --- a/docs/src/main/paradox/spring-web.md +++ b/docs/src/main/paradox/spring-web.md @@ -1,7 +1,7 @@ # Spring Web -Spring 5.0 introduced compatibility with [Reactive Streams](https://www.reactive-streams.org), a library interoperability standardization effort co-lead by Lightbend (with Apache Pekko Streams) along with Kaazing, Netflix, -Pivotal, Red Hat, Twitter and many others. +Spring 5.0 introduced compatibility with [Reactive Streams](https://www.reactive-streams.org), a library interoperability standardization effort co-lead by Lightbend (with Apache Pekko Streams and Akka Streams) +along with Kaazing, Netflix, Pivotal, Red Hat, Twitter and many others. Thanks to adopting Reactive Streams, multiple libraries can now inter-op since the same interfaces are implemented by all these libraries. Apache Pekko Streams by-design, hides the raw reactive-streams types from end-users, since it allows for From ce2b214ee5c13356a2c70df8f1b2255fdb51242f Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 7 Oct 2023 14:34:47 +0100 Subject: [PATCH 65/90] upgrade google-auth dependency (#256) --- project/Dependencies.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 4d443d62e..0dbd617a9 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -53,6 +53,7 @@ object Dependencies { val CouchbaseVersion = "2.7.16" val CouchbaseVersionForDocs = "2.7" + val GoogleAuthVersion = "1.20.0" val JwtCoreVersion = "3.0.1" val log4jOverSlf4jVersion = "1.7.36" @@ -195,7 +196,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, "com.github.jwt-scala" %% "jwt-json-common" % "7.1.5", // ApacheV2 - "com.google.auth" % "google-auth-library-credentials" % "0.24.1", // BSD 3-clause + "com.google.auth" % "google-auth-library-credentials" % GoogleAuthVersion, // BSD 3-clause "io.specto" % "hoverfly-java" % hoverflyVersion % Test // ApacheV2 ) ++ Mockito) @@ -236,7 +237,7 @@ object Dependencies { // https://github.com/googleapis/java-pubsub/tree/master/proto-google-cloud-pubsub-v1/ "com.google.cloud" % "google-cloud-pubsub" % "1.112.5" % "protobuf-src", "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, - "com.google.auth" % "google-auth-library-oauth2-http" % "0.22.2", + "com.google.auth" % "google-auth-library-oauth2-http" % GoogleAuthVersion, "com.google.protobuf" % "protobuf-java" % protobufJavaVersion, // pull in Pekko Discovery for our Pekko version "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) From 837dbe316d028cbef0fb075689156452f1af3c0e Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 8 Oct 2023 19:32:08 +0100 Subject: [PATCH 66/90] upgrade to avro 1.11.3 due to CVE (#259) * upgrade to avro 1.11.3 due to CVE * suppress deprecation warnings * try suppress deprecation warnings * try to suppress deprecation warnings * Update AbstractAvroParquetBase.scala --- .../java/docs/javadsl/AvroParquetSinkTest.java | 1 + .../src/test/java/docs/javadsl/Examples.java | 1 + .../docs/scaladsl/AbstractAvroParquetBase.scala | 17 ++++++++++------- project/Dependencies.scala | 8 +++++--- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/avroparquet/src/test/java/docs/javadsl/AvroParquetSinkTest.java b/avroparquet/src/test/java/docs/javadsl/AvroParquetSinkTest.java index d72820e8b..a6b7b4da9 100644 --- a/avroparquet/src/test/java/docs/javadsl/AvroParquetSinkTest.java +++ b/avroparquet/src/test/java/docs/javadsl/AvroParquetSinkTest.java @@ -76,6 +76,7 @@ public void setup() { records.add(new GenericRecordBuilder(schema).set("id", "3").set("body", "body13").build()); } + @SuppressWarnings("deprecation") @Test public void createNewParquetFile() throws InterruptedException, IOException, TimeoutException, ExecutionException { diff --git a/avroparquet/src/test/java/docs/javadsl/Examples.java b/avroparquet/src/test/java/docs/javadsl/Examples.java index 406631a25..a1a884f5e 100644 --- a/avroparquet/src/test/java/docs/javadsl/Examples.java +++ b/avroparquet/src/test/java/docs/javadsl/Examples.java @@ -58,6 +58,7 @@ public class Examples { Source source = AvroParquetSource.create(reader); // #init-source + @SuppressWarnings("deprecation") public Examples() throws IOException { // #init-flow diff --git a/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala b/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala index 97a4052f8..580b271a4 100644 --- a/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala +++ b/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquetBase.scala @@ -22,6 +22,7 @@ import org.apache.parquet.hadoop.util.HadoopInputFile import org.apache.parquet.hadoop.{ ParquetReader, ParquetWriter } import org.scalacheck.Gen +import scala.annotation.nowarn import scala.util.Random trait AbstractAvroParquetBase { @@ -47,6 +48,7 @@ trait AbstractAvroParquetBase { val conf: Configuration = new Configuration() conf.setBoolean(AvroReadSupport.AVRO_COMPATIBILITY, true) + @nowarn("msg=deprecated") def parquetWriter[T <: GenericRecord](file: String, conf: Configuration, schema: Schema): ParquetWriter[T] = AvroParquetWriter.builder[T](new Path(file)).withConf(conf).withSchema(schema).build() @@ -80,6 +82,7 @@ trait AbstractAvroParquetBase { // #prepare-source } + @nowarn("msg=deprecated") def sinkDocumentation(): Unit = { // #prepare-sink import com.sksamuel.avro4s.Record @@ -87,30 +90,30 @@ trait AbstractAvroParquetBase { import org.apache.hadoop.fs.Path import org.apache.parquet.avro.AvroReadSupport - val file: String = "./sample/path/test.parquet" - val conf: Configuration = new Configuration() + val file = "./sample/path/test.parquet" + val conf = new Configuration() conf.setBoolean(AvroReadSupport.AVRO_COMPATIBILITY, true) - val writer: ParquetWriter[Record] = + val writer = AvroParquetWriter.builder[Record](new Path(file)).withConf(conf).withSchema(schema).build() // #prepare-sink if (writer != null) { // forces val usage } } + @nowarn("msg=deprecated") def initWriterDocumentation(): Unit = { // #init-writer import org.apache.avro.generic.GenericRecord import org.apache.hadoop.fs.Path import org.apache.parquet.avro.AvroParquetReader - import org.apache.parquet.hadoop.ParquetReader import org.apache.parquet.hadoop.util.HadoopInputFile - val file: String = "./sample/path/test.parquet" - val writer: ParquetWriter[GenericRecord] = + val file = "./sample/path/test.parquet" + val writer = AvroParquetWriter.builder[GenericRecord](new Path(file)).withConf(conf).withSchema(schema).build() // #init-writer // #init-reader - val reader: ParquetReader[GenericRecord] = + val reader = AvroParquetReader.builder[GenericRecord](HadoopInputFile.fromPath(new Path(file), conf)).withConf(conf).build() // #init-reader if (writer != null && reader != null) { // forces val usage diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 0dbd617a9..4abb9666c 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -25,6 +25,7 @@ object Dependencies { val InfluxDBJavaVersion = "2.15" + val AvroVersion = "1.11.3" val AwsSdk2Version = "2.17.113" val AwsSpiPekkoHttpVersion = "0.1.0" // Sync with plugins.sbt @@ -157,12 +158,13 @@ object Dependencies { "com.google.jimfs" % "jimfs" % "1.2" % Test)) val avro4sVersion: Def.Initialize[String] = Def.setting { - if (Common.isScala3.value) "5.0.4" else "4.1.1" + if (Common.isScala3.value) "5.0.5" else "4.1.1" } val AvroParquet = Seq( libraryDependencies ++= Seq( - "org.apache.parquet" % "parquet-avro" % "1.10.1", // Apache2 + "org.apache.parquet" % "parquet-avro" % "1.13.1", // Apache2 + "org.apache.avro" % "avro" % AvroVersion, ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 "com.sksamuel.avro4s" %% "avro4s-core" % avro4sVersion.value % Test, @@ -214,7 +216,7 @@ object Dependencies { libraryDependencies ++= Seq( // https://github.com/googleapis/java-bigquerystorage/tree/master/proto-google-cloud-bigquerystorage-v1 "com.google.api.grpc" % "proto-google-cloud-bigquerystorage-v1" % "1.22.0" % "protobuf-src", - "org.apache.avro" % "avro" % "1.9.2" % "provided", + "org.apache.avro" % "avro" % AvroVersion % "provided", "org.apache.arrow" % "arrow-vector" % "4.0.0" % "provided", "io.grpc" % "grpc-auth" % org.apache.pekko.grpc.gen.BuildInfo.grpcVersion, "com.google.protobuf" % "protobuf-java" % protobufJavaVersion, From 867d80928ebbef36cd8a8cffab9265ea173db25d Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Mon, 9 Oct 2023 06:08:24 -0300 Subject: [PATCH 67/90] Remove unnecessary license comments --- project/Dependencies.scala | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 4abb9666c..4c094cf8c 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -122,13 +122,12 @@ object Dependencies { val Couchbase = Seq( libraryDependencies ++= Seq( - "com.couchbase.client" % "java-client" % CouchbaseVersion, // ApacheV2 - "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", // ApacheV2 - "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided, // Apache V2 - "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion % Test, // Apache V2 - "com.fasterxml.jackson.core" % "jackson-databind" % JacksonDatabindVersion % Test, // Apache V2 - "com.fasterxml.jackson.module" %% "jackson-module-scala" % JacksonDatabindVersion % Test // Apache V2 - )) + "com.couchbase.client" % "java-client" % CouchbaseVersion, + "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", + "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided, + "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion % Test, + "com.fasterxml.jackson.core" % "jackson-databind" % JacksonDatabindVersion % Test, + "com.fasterxml.jackson.module" %% "jackson-module-scala" % JacksonDatabindVersion % Test)) val `Doc-examples` = Seq( libraryDependencies ++= Seq( @@ -163,15 +162,14 @@ object Dependencies { val AvroParquet = Seq( libraryDependencies ++= Seq( - "org.apache.parquet" % "parquet-avro" % "1.13.1", // Apache2 + "org.apache.parquet" % "parquet-avro" % "1.13.1", "org.apache.avro" % "avro" % AvroVersion, - ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 - ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 + ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), + ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), "com.sksamuel.avro4s" %% "avro4s-core" % avro4sVersion.value % Test, "org.scalacheck" %% "scalacheck" % scalaCheckVersion % Test, - "org.specs2" %% "specs2-core" % "4.20.0" % Test, // MIT like: https://github.com/etorreborre/specs2/blob/master/LICENSE.txt - "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html - )) + "org.specs2" %% "specs2-core" % "4.20.0" % Test, + "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test)) val Ftp = Seq( libraryDependencies ++= Seq( @@ -197,10 +195,9 @@ object Dependencies { libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "com.github.jwt-scala" %% "jwt-json-common" % "7.1.5", // ApacheV2 - "com.google.auth" % "google-auth-library-credentials" % GoogleAuthVersion, // BSD 3-clause - "io.specto" % "hoverfly-java" % hoverflyVersion % Test // ApacheV2 - ) ++ Mockito) + "com.github.jwt-scala" %% "jwt-json-common" % "7.1.5", + "com.google.auth" % "google-auth-library-credentials" % GoogleAuthVersion, + "io.specto" % "hoverfly-java" % hoverflyVersion % Test) ++ Mockito) val GoogleBigQuery = Seq( libraryDependencies ++= Seq( @@ -275,8 +272,8 @@ object Dependencies { val Hdfs = Seq( libraryDependencies ++= Seq( ("org.apache.hadoop" % "hadoop-client" % HadoopVersion).exclude("log4j", "log4j").exclude("org.slf4j", - "slf4j-log4j12"), // ApacheV2 - "org.typelevel" %% "cats-core" % "2.9.0", // MIT, + "slf4j-log4j12"), + "org.typelevel" %% "cats-core" % "2.9.0", ("org.apache.hadoop" % "hadoop-hdfs" % HadoopVersion % Test).exclude("log4j", "log4j").exclude("org.slf4j", "slf4j-log4j12"), ("org.apache.hadoop" % "hadoop-common" % HadoopVersion % Test).exclude("log4j", "log4j").exclude("org.slf4j", @@ -289,8 +286,7 @@ object Dependencies { libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, - "com.github.jwt-scala" %% "jwt-json-common" % "7.1.5" // ApacheV2 - ) ++ Mockito) + "com.github.jwt-scala" %% "jwt-json-common" % "7.1.5") ++ Mockito) val InfluxDB = Seq( libraryDependencies ++= Seq( From 75ece5ae1f3f513735efc9fa83076a60ff96b98e Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 16 Oct 2023 09:39:01 +0100 Subject: [PATCH 68/90] upgrade netty used in cassandra connector due to CVE (#262) --- project/Dependencies.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 4c094cf8c..44d86fa36 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -28,6 +28,7 @@ object Dependencies { val AvroVersion = "1.11.3" val AwsSdk2Version = "2.17.113" val AwsSpiPekkoHttpVersion = "0.1.0" + val NettyVersion = "4.1.100.Final" // Sync with plugins.sbt val PekkoGrpcBinaryVersion = "current" val PekkoHttpVersion = "1.0.0" @@ -118,6 +119,7 @@ object Dependencies { .exclude("com.github.spotbugs", "spotbugs-annotations") .exclude("org.apache.tinkerpop", "*") // https://github.com/akka/alpakka/issues/2200 .exclude("com.esri.geometry", "esri-geometry-api"), // https://github.com/akka/alpakka/issues/2225 + "io.netty" % "netty-handler" % NettyVersion, "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided)) val Couchbase = Seq( From 39253e8b3c690fc2c1e870867aaead34ec28cdbe Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 16 Oct 2023 16:50:33 +0100 Subject: [PATCH 69/90] upgrade jackson version (#263) --- project/Dependencies.scala | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 44d86fa36..202d09fca 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -114,7 +114,7 @@ object Dependencies { val CassandraDriverVersionInDocs = "4.15" val Cassandra = Seq( - libraryDependencies ++= Seq( + libraryDependencies ++= JacksonDatabindDependencies ++ Seq( ("com.datastax.oss" % "java-driver-core" % CassandraDriverVersion) .exclude("com.github.spotbugs", "spotbugs-annotations") .exclude("org.apache.tinkerpop", "*") // https://github.com/akka/alpakka/issues/2200 @@ -186,6 +186,8 @@ object Dependencies { Seq("geode-core", "geode-cq") .map("org.apache.geode" % _ % GeodeVersion) ++ Seq( + "com.fasterxml.jackson.datatype" % "jackson-datatype-joda" % JacksonDatabindVersion, + "com.fasterxml.jackson.datatype" % "jackson-datatype-jsr310" % JacksonDatabindVersion, "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies ++ (if (isScala3.value) Seq.empty // Equivalent and relevant shapeless functionality has been mainlined into Scala 3 language/stdlib @@ -350,10 +352,10 @@ object Dependencies { "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion % Test)) val OrientDB = Seq( - libraryDependencies ++= Seq( - ("com.orientechnologies" % "orientdb-graphdb" % "3.1.9") + libraryDependencies ++= JacksonDatabindDependencies ++ Seq( + ("com.orientechnologies" % "orientdb-graphdb" % "3.1.20") .exclude("com.tinkerpop.blueprints", "blueprints-core"), - "com.orientechnologies" % "orientdb-object" % "3.1.9")) + "com.orientechnologies" % "orientdb-object" % "3.1.20")) val PravegaVersion = "0.10.2" val PravegaVersionForDocs = s"v$PravegaVersion" From ef72e9c2756930796b351329e21ce1b679004a27 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Tue, 24 Oct 2023 12:11:40 +0100 Subject: [PATCH 70/90] more details for v1.0.1 release (#265) --- docs/src/main/paradox/release-notes/index.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md index 352dab15f..e03ca0265 100644 --- a/docs/src/main/paradox/release-notes/index.md +++ b/docs/src/main/paradox/release-notes/index.md @@ -9,6 +9,16 @@ To understand the forces on version numbers, read about @ref:[Apache Pekko Conne * Fix `apiURL` so that projects depending on pekko-connectors have the correct url in their scaladocs via sbt's [autoAPIMappings](https://www.scala-sbt.org/1.x/docs/Howto-Scaladoc.html#Define+the+location+of+API+documentation+for+a+library) feature ([PR252](https://github.com/apache/incubator-pekko-connectors/pull/252)) +### Dependency Upgrades + +Most dependency changes in this release relate to upgrading dependencies that are affected by CVEs. + +* Pekko gRPC 1.0.1 (includes some useful dependency updates) +* avro 1.11.3 ([#259](https://github.com/apache/incubator-pekko-connectors/issues/259)) +* jackson 2.14.3 - use in more places ([#263](https://github.com/apache/incubator-pekko-connectors/pull/263)) +* google-auth-library-oauth2-http 1.20.0 ([#256](https://github.com/apache/incubator-pekko-connectors/issues/256)) +* netty 4.1.100 - updated in cassandra connector ([#262](https://github.com/apache/incubator-pekko-connectors/pull/262)) + ## 1.0.0 Apache Pekko Connectors 1.0.0 is based on Alpakka 4.0.0. Pekko came about as a result of Lightbend's decision to make future From 19336cc0c76ae974f0ac737b11383bdf38dd2ac1 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Thu, 26 Oct 2023 13:41:44 +0100 Subject: [PATCH 71/90] sbt-source-dist 0.1.11 (#267) --- project/plugins.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index e30fc1c26..01caae2c9 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -11,7 +11,7 @@ addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.4.3") addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.1.1") addSbtPlugin("net.bzzt" % "sbt-reproducible-builds" % "0.31") addSbtPlugin("org.mdedetrich" % "sbt-apache-sonatype" % "0.1.10") -addSbtPlugin("com.github.pjfanning" % "sbt-source-dist" % "0.1.10") +addSbtPlugin("com.github.pjfanning" % "sbt-source-dist" % "0.1.11") addSbtPlugin("com.github.sbt" % "sbt-license-report" % "1.6.1") // discipline addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.9.0") From ae49fbcabdec86f55e93bb959ecfc8467fbe3a84 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Fri, 27 Oct 2023 20:21:41 +0100 Subject: [PATCH 72/90] remove release note (#268) --- docs/src/main/paradox/release-notes/index.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md index e03ca0265..758c5f12a 100644 --- a/docs/src/main/paradox/release-notes/index.md +++ b/docs/src/main/paradox/release-notes/index.md @@ -13,7 +13,6 @@ url in their scaladocs via sbt's [autoAPIMappings](https://www.scala-sbt.org/1.x Most dependency changes in this release relate to upgrading dependencies that are affected by CVEs. -* Pekko gRPC 1.0.1 (includes some useful dependency updates) * avro 1.11.3 ([#259](https://github.com/apache/incubator-pekko-connectors/issues/259)) * jackson 2.14.3 - use in more places ([#263](https://github.com/apache/incubator-pekko-connectors/pull/263)) * google-auth-library-oauth2-http 1.20.0 ([#256](https://github.com/apache/incubator-pekko-connectors/issues/256)) From b4884fa64dac6f66e3cf758f0e42948f7202dfb9 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 28 Oct 2023 13:20:01 +0100 Subject: [PATCH 73/90] pekko grpc 1.0.1 release (#266) * pekko grpc 1.0.1 release * Update plugins.sbt --- project/plugins.sbt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/project/plugins.sbt b/project/plugins.sbt index 01caae2c9..14c543ee8 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -7,6 +7,8 @@ * This file is part of the Apache Pekko project, which was derived from Akka. */ +dependencyOverrides += "org.scala-lang.modules" %% "scala-xml" % "2.2.0" + addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.4.3") addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.1.1") addSbtPlugin("net.bzzt" % "sbt-reproducible-builds" % "0.31") @@ -34,7 +36,7 @@ addSbtPlugin(("com.lightbend.paradox" % "sbt-paradox-project-info" % "2.0.0").fo addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2") addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1") -// Pekko gRPC -- sync with version in Dependencies.scala:29 -addSbtPlugin("org.apache.pekko" % "pekko-grpc-sbt-plugin" % "1.0.0") +// Pekko gRPC -- sync with PekkoGrpcBinaryVersion in Dependencies.scala +addSbtPlugin("org.apache.pekko" % "pekko-grpc-sbt-plugin" % "1.0.1") // templating addSbtPlugin("io.spray" % "sbt-boilerplate" % "0.6.1") From ac4655064400e9800871f70c7527b65dc7655ad1 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sat, 28 Oct 2023 17:54:23 +0100 Subject: [PATCH 74/90] publish 1.0.1 docs (#270) --- .github/workflows/publish-1.0-docs.yml | 10 +++++----- project/build.properties | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/publish-1.0-docs.yml b/.github/workflows/publish-1.0-docs.yml index c4b449302..2e626a11e 100644 --- a/.github/workflows/publish-1.0-docs.yml +++ b/.github/workflows/publish-1.0-docs.yml @@ -26,17 +26,17 @@ jobs: - name: Build Documentation run: |- - sbt "set ThisBuild / version := \"1.0.0\"; docs/paradox; unidoc" + sbt "set ThisBuild / version := \"1.0.1\"; docs/paradox; unidoc" # Create directory structure upfront since rsync does not create intermediate directories otherwise - name: Create directory structure run: |- - mkdir -p target/nightly-docs/docs/pekko-connectors/1.0.0/ + mkdir -p target/nightly-docs/docs/pekko-connectors/1.0.1/ mkdir -p target/nightly-docs/docs/pekko-connectors/1.0/ - cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-connectors/1.0.0/docs + cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-connectors/1.0.1/docs cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-connectors/1.0/docs rm -r docs/target/paradox/site/main/ - cp -r target/scala-2.13/unidoc target/nightly-docs/docs/pekko-connectors/1.0.0/api + cp -r target/scala-2.13/unidoc target/nightly-docs/docs/pekko-connectors/1.0.1/api cp -r target/scala-2.13/unidoc target/nightly-docs/docs/pekko-connectors/1.0/api rm -r target/scala-2.13/unidoc @@ -45,7 +45,7 @@ jobs: with: upload: true switches: --archive --compress --update --delete --progress --relative - local_path: target/nightly-docs/./docs/pekko-connectors/1.0.0 # The intermediate dot is to show `--relative` which paths to operate on + local_path: target/nightly-docs/./docs/pekko-connectors/1.0.1 # The intermediate dot is to show `--relative` which paths to operate on remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/ remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }} remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }} diff --git a/project/build.properties b/project/build.properties index 27430827b..e8a1e246e 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.9.6 +sbt.version=1.9.7 From 49189361744262f99ff60db8eb85be355a22502a Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sun, 29 Oct 2023 10:04:01 +0100 Subject: [PATCH 75/90] Properly fix versioning for Pekko dependencies --- project/Dependencies.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 202d09fca..a1e8bd302 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -21,7 +21,7 @@ object Dependencies { val ScalaVersions = Seq(Scala213, Scala212, Scala3) val PekkoVersion = "1.0.1" - val PekkoBinaryVersion = "current" + val PekkoBinaryVersion = "1.0" val InfluxDBJavaVersion = "2.15" @@ -30,9 +30,9 @@ object Dependencies { val AwsSpiPekkoHttpVersion = "0.1.0" val NettyVersion = "4.1.100.Final" // Sync with plugins.sbt - val PekkoGrpcBinaryVersion = "current" + val PekkoGrpcBinaryVersion = "1.0" val PekkoHttpVersion = "1.0.0" - val PekkoHttpBinaryVersion = "current" + val PekkoHttpBinaryVersion = "1.0" val ScalaTestVersion = "3.2.14" val TestContainersScalaTestVersion = "0.40.14" val mockitoVersion = "4.2.0" // check even https://github.com/scalatest/scalatestplus-mockito/releases From 749e69a72543ed30f5bb3b6a5d9e0bb8471f463d Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 30 Oct 2023 09:24:48 +0000 Subject: [PATCH 76/90] remove ActorMaterializer (#272) --- .../src/test/java/docs/javadsl/ExampleReader.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/google-cloud-bigquery-storage/src/test/java/docs/javadsl/ExampleReader.java b/google-cloud-bigquery-storage/src/test/java/docs/javadsl/ExampleReader.java index 70143e894..3dfbd4c0d 100644 --- a/google-cloud-bigquery-storage/src/test/java/docs/javadsl/ExampleReader.java +++ b/google-cloud-bigquery-storage/src/test/java/docs/javadsl/ExampleReader.java @@ -18,7 +18,6 @@ import org.apache.pekko.NotUsed; import org.apache.pekko.actor.ActorSystem; -import org.apache.pekko.stream.ActorMaterializer; // #read-all import org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.BigQueryRecord; import org.apache.pekko.stream.connectors.googlecloud.bigquery.storage.BigQueryStorageSettings; @@ -40,7 +39,6 @@ public class ExampleReader { static final ActorSystem sys = ActorSystem.create("ExampleReader"); - static final ActorMaterializer mat = ActorMaterializer.create(sys); // #read-all Source< From a6dd0ee164d8d86b12c547c771ffa17983c867fe Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 30 Oct 2023 12:31:12 +0000 Subject: [PATCH 77/90] remove ActorMaterializer (#273) --- .../apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala | 3 --- 1 file changed, 3 deletions(-) diff --git a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala index 25bcc4fb9..b26b0b9c8 100644 --- a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala +++ b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala @@ -8,7 +8,6 @@ import java.util.concurrent.TimeUnit import org.apache.pekko import pekko.NotUsed import pekko.actor.ActorSystem -import pekko.stream.ActorMaterializer import pekko.stream.scaladsl.Source import pekko.util.ByteString import org.openjdk.jmh.annotations._ @@ -51,7 +50,6 @@ class CsvBench { implicit val system: ActorSystem = ActorSystem() implicit val executionContext: ExecutionContext = system.dispatcher - implicit val mat: ActorMaterializer = ActorMaterializer() /** * Size of [[ByteString]] chunks in bytes. @@ -85,7 +83,6 @@ class CsvBench { @TearDown def tearDown(): Unit = { - mat.shutdown() system.terminate() } From 0a7143712f69b1f0c5e38595b72fe3f4ed873dd8 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 30 Oct 2023 17:51:13 +0000 Subject: [PATCH 78/90] add license headers to some files (#274) --- .../pekko/stream/connectors/csv/scaladsl/CsvBench.scala | 9 +++++++++ .../schema/ProductSchemasInstances.scala.template | 9 +++++++++ .../spray/BigQueryProductFormatsInstances.scala.template | 9 +++++++++ scripts/link-validator.conf | 2 ++ 4 files changed, 29 insertions(+) diff --git a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala index b26b0b9c8..d81dc4a9e 100644 --- a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala +++ b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala @@ -1,3 +1,12 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, which was derived from Akka. + */ + /* * Copyright (C) 2016-2019 Lightbend Inc. */ diff --git a/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/ProductSchemasInstances.scala.template b/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/ProductSchemasInstances.scala.template index 10b82289f..1af68f8be 100644 --- a/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/ProductSchemasInstances.scala.template +++ b/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/schema/ProductSchemasInstances.scala.template @@ -1,3 +1,12 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, which was derived from Akka. + */ + /* * Copyright (C) 2016-2020 Lightbend Inc. */ diff --git a/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryProductFormatsInstances.scala.template b/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryProductFormatsInstances.scala.template index 2cb84de40..04bd4260a 100644 --- a/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryProductFormatsInstances.scala.template +++ b/google-cloud-bigquery/src/main/boilerplate/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/spray/BigQueryProductFormatsInstances.scala.template @@ -1,3 +1,12 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * license agreements; and to You under the Apache License, version 2.0: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * This file is part of the Apache Pekko project, which was derived from Akka. + */ + /* * Copyright (C) 2016-2020 Lightbend Inc. */ diff --git a/scripts/link-validator.conf b/scripts/link-validator.conf index 37dac1b74..1b28105e8 100644 --- a/scripts/link-validator.conf +++ b/scripts/link-validator.conf @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 + // config for https://github.com/ennru/site-link-validator/ site-link-validator { root-dir = "./docs/target/site/" From dcc040bb3318cdf228220ef1a9005c3f819d056b Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Wed, 1 Nov 2023 08:15:03 +0100 Subject: [PATCH 79/90] Remove useless condition --- .../stream/connectors/mqtt/streaming/impl/RequestState.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala index da54ba8e3..23b9ab79f 100644 --- a/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala +++ b/mqtt-streaming/src/main/scala/org/apache/pekko/stream/connectors/mqtt/streaming/impl/RequestState.scala @@ -646,7 +646,7 @@ object Topics { val tnHead = tn.charAt(0) if (tfnHead == '/' && tnHead != '/') { false - } else if (tfnHead == '/' && tnHead == '/' && tn.length == 1) { + } else if (tfnHead == '/' && tn.length == 1) { matchStrings(tfn, tn.tail) } else if (tfnHead != '+' && tfnHead != '#' && tfnHead != tnHead) { false From 46796e26ac325b5321f2c30770ac79fdef42d53c Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 12 Nov 2023 17:47:25 +0100 Subject: [PATCH 80/90] enable mima checks (#278) --- .github/workflows/check-build-test.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/check-build-test.yml b/.github/workflows/check-build-test.yml index 08439667a..9fc7b56f2 100644 --- a/.github/workflows/check-build-test.yml +++ b/.github/workflows/check-build-test.yml @@ -4,7 +4,6 @@ on: pull_request: push: branches: - - master - main tags-ignore: - v* @@ -37,9 +36,8 @@ jobs: - name: Cache Coursier cache uses: coursier/cache-action@v6.4.0 - # temporarily remove mima checks - - name: "Code style, compile tests, MiMa. Run locally with: sbt +~2.13 \"javafmtCheckAll; Test/compile; mimaReportBinaryIssues\"" - run: sbt "javafmtCheckAll; +Test/compile" + - name: "Code style, compile tests, MiMa. Run locally with: sbt \"javafmtCheckAll; +Test/compile; +mimaReportBinaryIssues\"" + run: sbt "javafmtCheckAll; +Test/compile; +mimaReportBinaryIssues" documentation: name: ScalaDoc, Documentation with Paradox From aebbf49658edd65fa47d96118fe07e1354c2b7f3 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sun, 12 Nov 2023 15:36:34 +0100 Subject: [PATCH 81/90] Accept any materializer type param for S3's chunkUploadSink --- .../apache/pekko/stream/connectors/s3/impl/S3Stream.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala index 5fba9b443..7b88ad6c8 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/impl/S3Stream.scala @@ -906,7 +906,7 @@ import scala.util.{ Failure, Success, Try } */ def multipartUploadWithContext[C]( s3Location: S3Location, - chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), NotUsed], + chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _], contentType: ContentType = ContentTypes.`application/octet-stream`, s3Headers: S3Headers, chunkSize: Int = MinChunkSize, @@ -943,7 +943,7 @@ import scala.util.{ Failure, Success, Try } s3Location: S3Location, uploadId: String, previousParts: immutable.Iterable[Part], - chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), NotUsed], + chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _], contentType: ContentType = ContentTypes.`application/octet-stream`, s3Headers: S3Headers, chunkSize: Int = MinChunkSize, @@ -1216,7 +1216,7 @@ import scala.util.{ Failure, Success, Try } contentType: ContentType, s3Headers: S3Headers, chunkSize: Int, - chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), NotUsed], + chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _], initialUploadState: Option[(String, Int)] = None)( parallelism: Int): Flow[(ByteString, C), UploadPartResponse, NotUsed] = { From 32facdd804b61e3c0839aef06b1091f0290538ca Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sun, 12 Nov 2023 20:34:36 +0100 Subject: [PATCH 82/90] Change materializer type for chunkUploadSink in S3 DSL's --- ...nkUploadSink-type-param-change.backwards.excludes | 12 ++++++++++++ .../pekko/stream/connectors/s3/javadsl/S3.scala | 12 ++++++------ .../pekko/stream/connectors/s3/scaladsl/S3.scala | 8 ++++---- 3 files changed, 22 insertions(+), 10 deletions(-) create mode 100644 s3/src/main/mima-filters/1.0.x.backwards.excludes/chunkUploadSink-type-param-change.backwards.excludes diff --git a/s3/src/main/mima-filters/1.0.x.backwards.excludes/chunkUploadSink-type-param-change.backwards.excludes b/s3/src/main/mima-filters/1.0.x.backwards.excludes/chunkUploadSink-type-param-change.backwards.excludes new file mode 100644 index 000000000..1d9a11221 --- /dev/null +++ b/s3/src/main/mima-filters/1.0.x.backwards.excludes/chunkUploadSink-type-param-change.backwards.excludes @@ -0,0 +1,12 @@ +# These filters are here because a type argument in the chunkUploadSink parameter was changed from +# NotUsed to _ which has zero effect on runtime +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.javadsl.S3.resumeMultipartUploadWithContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.javadsl.S3.multipartUploadWithContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.javadsl.S3.resumeMultipartUploadWithContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithHeadersAndContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.multipartUploadWithHeadersAndContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.multipartUploadWithContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.multipartUploadWithHeadersAndContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithContext") +ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.stream.connectors.s3.scaladsl.S3.resumeMultipartUploadWithHeadersAndContext") diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala index 41078d8f0..5873d978d 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/javadsl/S3.scala @@ -961,7 +961,7 @@ object S3 { def multipartUploadWithContext[C]( bucket: String, key: String, - chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], NotUsed], + chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], _], contentType: ContentType, s3Headers: S3Headers): Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] = S3Stream @@ -1003,7 +1003,7 @@ object S3 { def multipartUploadWithContext[C]( bucket: String, key: String, - chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], NotUsed], + chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], _], contentType: ContentType): Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] = multipartUploadWithContext[C](bucket, key, @@ -1034,7 +1034,7 @@ object S3 { def multipartUploadWithContext[C]( bucket: String, key: String, - chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], NotUsed]) + chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], _]) : Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] = multipartUploadWithContext[C](bucket, key, chunkUploadSink, ContentTypes.APPLICATION_OCTET_STREAM) @@ -1133,7 +1133,7 @@ object S3 { key: String, uploadId: String, previousParts: java.lang.Iterable[Part], - chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], NotUsed], + chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], _], contentType: ContentType, s3Headers: S3Headers): Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] = { S3Stream @@ -1183,7 +1183,7 @@ object S3 { key: String, uploadId: String, previousParts: java.lang.Iterable[Part], - chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], NotUsed], + chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], _], contentType: ContentType): Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] = resumeMultipartUploadWithContext[C](bucket, key, @@ -1221,7 +1221,7 @@ object S3 { key: String, uploadId: String, previousParts: java.lang.Iterable[Part], - chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], NotUsed]) + chunkUploadSink: Sink[JPair[UploadPartResponse, java.lang.Iterable[C]], _]) : Sink[JPair[ByteString, C], CompletionStage[MultipartUploadResult]] = resumeMultipartUploadWithContext[C](bucket, key, diff --git a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala index 56cd9bc86..9a09fef56 100644 --- a/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala +++ b/s3/src/main/scala/org/apache/pekko/stream/connectors/s3/scaladsl/S3.scala @@ -581,7 +581,7 @@ object S3 { def multipartUploadWithContext[C]( bucket: String, key: String, - chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), NotUsed], + chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _], contentType: ContentType = ContentTypes.`application/octet-stream`, metaHeaders: MetaHeaders = MetaHeaders(Map()), cannedAcl: CannedAcl = CannedAcl.Private, @@ -626,7 +626,7 @@ object S3 { def multipartUploadWithHeadersAndContext[C]( bucket: String, key: String, - chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), NotUsed], + chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _], contentType: ContentType = ContentTypes.`application/octet-stream`, chunkSize: Int = MinChunkSize, chunkingParallelism: Int = 4, @@ -710,7 +710,7 @@ object S3 { key: String, uploadId: String, previousParts: immutable.Iterable[Part], - chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), NotUsed], + chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _], contentType: ContentType = ContentTypes.`application/octet-stream`, metaHeaders: MetaHeaders = MetaHeaders(Map()), cannedAcl: CannedAcl = CannedAcl.Private, @@ -797,7 +797,7 @@ object S3 { key: String, uploadId: String, previousParts: immutable.Iterable[Part], - chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), NotUsed], + chunkUploadSink: Sink[(UploadPartResponse, immutable.Iterable[C]), _], contentType: ContentType = ContentTypes.`application/octet-stream`, chunkSize: Int = MinChunkSize, chunkingParallelism: Int = 4, From 3e0a1c2e456d02bb477cd705bbdc9358eec2f9c7 Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Sat, 2 Dec 2023 12:51:29 +1100 Subject: [PATCH 83/90] Update Pekko --- project/Dependencies.scala | 2 +- xml/src/test/java/docs/javadsl/XmlParsingTest.java | 1 + xml/src/test/scala/docs/scaladsl/XmlProcessingSpec.scala | 3 +++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index a1e8bd302..439623280 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -20,7 +20,7 @@ object Dependencies { val Scala3 = "3.3.1" val ScalaVersions = Seq(Scala213, Scala212, Scala3) - val PekkoVersion = "1.0.1" + val PekkoVersion = "1.0.2" val PekkoBinaryVersion = "1.0" val InfluxDBJavaVersion = "2.15" diff --git a/xml/src/test/java/docs/javadsl/XmlParsingTest.java b/xml/src/test/java/docs/javadsl/XmlParsingTest.java index 9c63a5be6..bf515af04 100644 --- a/xml/src/test/java/docs/javadsl/XmlParsingTest.java +++ b/xml/src/test/java/docs/javadsl/XmlParsingTest.java @@ -47,6 +47,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; +@SuppressWarnings("deprecation") public class XmlParsingTest { @Rule public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); diff --git a/xml/src/test/scala/docs/scaladsl/XmlProcessingSpec.scala b/xml/src/test/scala/docs/scaladsl/XmlProcessingSpec.scala index 08d21ed02..84aba78eb 100644 --- a/xml/src/test/scala/docs/scaladsl/XmlProcessingSpec.scala +++ b/xml/src/test/scala/docs/scaladsl/XmlProcessingSpec.scala @@ -29,6 +29,9 @@ import scala.concurrent.{ Await, Future } import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec +import scala.annotation.nowarn + +@nowarn("msg=deprecated") class XmlProcessingSpec extends AnyWordSpec with Matchers with ScalaFutures with BeforeAndAfterAll with LogCapturing { implicit val system: ActorSystem = ActorSystem("Test") implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 2.seconds, interval = 50.millis) From 320604adad903325b309236ccc55cc9f3fc14883 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 4 Dec 2023 20:11:22 +0100 Subject: [PATCH 84/90] logback 1.2.13 (#287) --- project/Dependencies.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 439623280..3bf15d496 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -72,7 +72,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-stream" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion, "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, - "ch.qos.logback" % "logback-classic" % "1.2.11", + "ch.qos.logback" % "logback-classic" % "1.2.13", "org.scalatest" %% "scalatest" % ScalaTestVersion, "com.dimafeng" %% "testcontainers-scala-scalatest" % TestContainersScalaTestVersion, "com.novocode" % "junit-interface" % "0.11", From db689989006380aac807f752e357002cdbd50d78 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 11 Dec 2023 09:42:39 +0100 Subject: [PATCH 85/90] increase cassandra test timeout (#290) --- cassandra/src/test/java/docs/javadsl/CassandraTestHelper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/src/test/java/docs/javadsl/CassandraTestHelper.java b/cassandra/src/test/java/docs/javadsl/CassandraTestHelper.java index 10009aed1..ca4bb1e8e 100644 --- a/cassandra/src/test/java/docs/javadsl/CassandraTestHelper.java +++ b/cassandra/src/test/java/docs/javadsl/CassandraTestHelper.java @@ -65,7 +65,7 @@ public static T await(CompletionStage cs) } public static T await(Future future) { - int seconds = 20; + int seconds = 40; try { return Await.result(future, FiniteDuration.create(seconds, TimeUnit.SECONDS)); } catch (InterruptedException e) { From 44b465eb8730381249857bb10cc17ae510252e8f Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 24 Dec 2023 00:12:40 +0100 Subject: [PATCH 86/90] use sbt-pekko-build (#293) temporarily test with pekko main snapshot Update build.sbt logback issue sbt-pekko-build 1.0.2 add nightly job Update nightly-pekko-1.1-builds.yaml Update nightly-pekko-1.1-builds.yaml multiple jdks temp run for pull request Update KinesisTest.java Update nightly-pekko-1.1-builds.yaml Update nightly-pekko-1.1-builds.yaml deprecation issue Update DirectoryChangesSource.java Update nightly-pekko-1.1-builds.yaml Revert "Update DirectoryChangesSource.java" This reverts commit 1b1932a4e0c78142f76bd45ce6471d34a971c3fb. Revert "deprecation issue" This reverts commit 2b41b26ad1a740bacaaed35994cf2fc7a2e5ff44. Revert "Update KinesisTest.java" This reverts commit a7c472fbe44ee90c8a3e0d4cd4ee0576f0ef98a4. Revert "temp run for pull request" This reverts commit c92b62db801aa4aa912e0a683272607ec17f6250. --- .github/workflows/check-build-test.yml | 6 +-- .github/workflows/headers.yml | 2 +- .github/workflows/link-validator.yml | 2 +- .github/workflows/nightly-builds.yaml | 2 +- .../workflows/nightly-pekko-1.1-builds.yaml | 43 +++++++++++++++++++ .github/workflows/publish-1.0-docs.yml | 2 +- .github/workflows/publish-nightly.yml | 2 +- build.sbt | 3 ++ project/Dependencies.scala | 9 ++-- project/plugins.sbt | 1 + 10 files changed, 61 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/nightly-pekko-1.1-builds.yaml diff --git a/.github/workflows/check-build-test.yml b/.github/workflows/check-build-test.yml index 9fc7b56f2..3ff858d09 100644 --- a/.github/workflows/check-build-test.yml +++ b/.github/workflows/check-build-test.yml @@ -28,7 +28,7 @@ jobs: fetch-depth: 0 - name: Setup Java 8 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 8 @@ -53,7 +53,7 @@ jobs: fetch-depth: 0 - name: Setup Java 11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 11 @@ -135,7 +135,7 @@ jobs: fetch-depth: 0 - name: Setup Java 8 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 8 diff --git a/.github/workflows/headers.yml b/.github/workflows/headers.yml index e040d34cb..7f723e562 100644 --- a/.github/workflows/headers.yml +++ b/.github/workflows/headers.yml @@ -17,7 +17,7 @@ jobs: fetch-depth: 0 - name: Setup Java 8 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 8 diff --git a/.github/workflows/link-validator.yml b/.github/workflows/link-validator.yml index 4bf4e7230..76b262f46 100644 --- a/.github/workflows/link-validator.yml +++ b/.github/workflows/link-validator.yml @@ -16,7 +16,7 @@ jobs: fetch-depth: 0 - name: Setup Java 11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 11 diff --git a/.github/workflows/nightly-builds.yaml b/.github/workflows/nightly-builds.yaml index a9b6202e7..85d88c57c 100644 --- a/.github/workflows/nightly-builds.yaml +++ b/.github/workflows/nightly-builds.yaml @@ -19,7 +19,7 @@ jobs: fetch-depth: 0 - name: Setup Java 8 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 8 diff --git a/.github/workflows/nightly-pekko-1.1-builds.yaml b/.github/workflows/nightly-pekko-1.1-builds.yaml new file mode 100644 index 000000000..63ee71a74 --- /dev/null +++ b/.github/workflows/nightly-pekko-1.1-builds.yaml @@ -0,0 +1,43 @@ +name: Nightly Pekko 1.1 Builds + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +permissions: {} + +concurrency: + # Only run once for latest commit per ref and cancel other (previous) runs. + group: pekko-1.1-${{ github.ref }} + cancel-in-progress: true + +jobs: + test-compile: + name: Compile + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + JDK: [ 8, 11 ] + env: + JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: 0 + + - name: Setup Java ${{ matrix.JDK }} + uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: ${{ matrix.JDK }} + + - name: Cache Coursier cache + uses: coursier/cache-action@v6.4.0 + + - name: "compile, including tests. Run locally with: sbt -Dpekko.build.pekko.version=main +Test/compile" + run: sbt -Dpekko.build.pekko.version=main +Test/compile diff --git a/.github/workflows/publish-1.0-docs.yml b/.github/workflows/publish-1.0-docs.yml index 2e626a11e..758daff92 100644 --- a/.github/workflows/publish-1.0-docs.yml +++ b/.github/workflows/publish-1.0-docs.yml @@ -19,7 +19,7 @@ jobs: fetch-depth: 0 - name: Setup Java 8 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 8 diff --git a/.github/workflows/publish-nightly.yml b/.github/workflows/publish-nightly.yml index a7f0a2dc3..864870fec 100644 --- a/.github/workflows/publish-nightly.yml +++ b/.github/workflows/publish-nightly.yml @@ -21,7 +21,7 @@ jobs: fetch-depth: 0 - name: Setup Java 8 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: temurin java-version: 8 diff --git a/build.sbt b/build.sbt index 454f1533d..65a2ad301 100644 --- a/build.sbt +++ b/build.sbt @@ -12,6 +12,7 @@ import net.bzzt.reproduciblebuilds.ReproducibleBuildsPlugin.reproducibleBuildsCh ThisBuild / apacheSonatypeProjectProfile := "pekko" sourceDistName := "apache-pekko-connectors" sourceDistIncubating := true +ThisBuild / resolvers += Resolver.ApacheMavenSnapshotsRepo commands := commands.value.filterNot { command => command.nameOption.exists { name => @@ -470,6 +471,8 @@ def internalProject(projectId: String, additionalSettings: sbt.Def.SettingsDefin Global / onLoad := (Global / onLoad).value.andThen { s => val v = version.value val log = sLog.value + log.info( + s"Building Pekko Connectors $v against Pekko ${Dependencies.PekkoVersion} on Scala ${(googleCommon / scalaVersion).value}") if (dynverGitDescribeOutput.value.hasNoTags) log.error( s"Failed to derive version from git tags. Maybe run `git fetch --unshallow` or `git fetch upstream` on a fresh git clone from a fork? Derived version: $v") diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 3bf15d496..e4a1b07c5 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -10,6 +10,7 @@ import sbt._ import Common.isScala3 import Keys._ +import com.github.pjfanning.pekkobuild.PekkoDependency object Dependencies { @@ -20,8 +21,8 @@ object Dependencies { val Scala3 = "3.3.1" val ScalaVersions = Seq(Scala213, Scala212, Scala3) - val PekkoVersion = "1.0.2" - val PekkoBinaryVersion = "1.0" + val PekkoVersion = PekkoDependency.pekkoVersionDerivedFromDefault("1.0.2") + val PekkoBinaryVersion = PekkoVersion.take(3) val InfluxDBJavaVersion = "2.15" @@ -40,6 +41,8 @@ object Dependencies { val hoverflyVersion = "0.14.1" val scalaCheckVersion = "1.16.0" + val LogbackVersion = if (PekkoBinaryVersion == "1.0") "1.2.13" else "1.3.14" + /** * Calculates the scalatest version in a format that is used for `org.scalatestplus` scalacheck artifacts * @@ -72,7 +75,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-stream" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion, "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, - "ch.qos.logback" % "logback-classic" % "1.2.13", + "ch.qos.logback" % "logback-classic" % LogbackVersion, "org.scalatest" %% "scalatest" % ScalaTestVersion, "com.dimafeng" %% "testcontainers-scala-scalatest" % TestContainersScalaTestVersion, "com.novocode" % "junit-interface" % "0.11", diff --git a/project/plugins.sbt b/project/plugins.sbt index 14c543ee8..1679336b9 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -14,6 +14,7 @@ addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.1.1") addSbtPlugin("net.bzzt" % "sbt-reproducible-builds" % "0.31") addSbtPlugin("org.mdedetrich" % "sbt-apache-sonatype" % "0.1.10") addSbtPlugin("com.github.pjfanning" % "sbt-source-dist" % "0.1.11") +addSbtPlugin("com.github.pjfanning" % "sbt-pekko-build" % "0.1.1") addSbtPlugin("com.github.sbt" % "sbt-license-report" % "1.6.1") // discipline addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.9.0") From 0bc0b9ea1fcdaada465a39f8b23d2b5b904047bd Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Sun, 24 Dec 2023 00:32:46 +0100 Subject: [PATCH 87/90] prepend package name for Record class (#295) * prepend package name for Record class * Update nightly-pekko-1.1-builds.yaml --- .github/workflows/nightly-pekko-1.1-builds.yaml | 2 +- .../stream/connectors/kinesis/javadsl/KinesisTest.java | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/nightly-pekko-1.1-builds.yaml b/.github/workflows/nightly-pekko-1.1-builds.yaml index 63ee71a74..d872db52c 100644 --- a/.github/workflows/nightly-pekko-1.1-builds.yaml +++ b/.github/workflows/nightly-pekko-1.1-builds.yaml @@ -19,7 +19,7 @@ jobs: strategy: fail-fast: false matrix: - JDK: [ 8, 11 ] + JDK: [ 8, 11, 17 ] env: JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 diff --git a/kinesis/src/test/java/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisTest.java b/kinesis/src/test/java/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisTest.java index be505de06..6973921be 100644 --- a/kinesis/src/test/java/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisTest.java +++ b/kinesis/src/test/java/org/apache/pekko/stream/connectors/kinesis/javadsl/KinesisTest.java @@ -85,12 +85,15 @@ public void PullRecord() throws Exception { invocation -> CompletableFuture.completedFuture( GetRecordsResponse.builder() - .records(Record.builder().sequenceNumber("1").build()) + .records(software.amazon.awssdk.services.kinesis.model.Record + .builder().sequenceNumber("1").build()) .nextShardIterator("iter") .build())); - final Source source = KinesisSource.basic(settings, amazonKinesisAsync); - final CompletionStage record = source.runWith(Sink.head(), system); + final Source source = + KinesisSource.basic(settings, amazonKinesisAsync); + final CompletionStage record = + source.runWith(Sink.head(), system); assertEquals("1", record.toCompletableFuture().get(10, TimeUnit.SECONDS).sequenceNumber()); } From c53ba216b79ded5c8216cdb2506c9d05d7e379cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Ferreira?= Date: Thu, 28 Dec 2023 10:43:19 +0000 Subject: [PATCH 88/90] Kinesis: fix maxBytesPerSecond throttling (#298) --- .../stream/connectors/kinesis/scaladsl/KinesisFlow.scala | 4 ++-- .../stream/connectors/kinesis/KinesisFlowSpec.scala | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala index bb64d7a50..3c02937e5 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/scaladsl/KinesisFlow.scala @@ -104,8 +104,8 @@ object KinesisFlow { entries: Iterable[(PutRecordsRequestEntry, T)])(result: PutRecordsResponse): List[(PutRecordsResultEntry, T)] = result.records.asScala.toList.zip(entries).map { case (res, (_, t)) => (res, t) } - private def getPayloadByteSize[T](record: (PutRecordsRequestEntry, T)): Int = record match { - case (request, _) => request.partitionKey.length + request.data.asByteBuffer.position() + private[kinesis] def getPayloadByteSize[T](record: (PutRecordsRequestEntry, T)): Int = record match { + case (request, _) => request.partitionKey.length + request.data.asByteArrayUnsafe.length } def byPartitionAndData( diff --git a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala index e430d9228..d124840a4 100644 --- a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala +++ b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisFlowSpec.scala @@ -66,6 +66,15 @@ class KinesisFlowSpec extends AnyWordSpec with Matchers with KinesisMock with Lo sinkProbe.expectError(FailurePublishingRecords(requestError)) } } + + "compute payload size" in { + val r = PutRecordsRequestEntry + .builder() + .partitionKey("") + .data(SdkBytes.fromByteBuffer(ByteString("data").asByteBuffer)) + .build() + KinesisFlow.getPayloadByteSize((r, "")) shouldBe 4 + } } "KinesisFlowWithUserContext" must { From b04de532761b770c112a4c0267a88da9a38a276b Mon Sep 17 00:00:00 2001 From: Matthew de Detrich Date: Fri, 29 Dec 2023 21:49:26 +1100 Subject: [PATCH 89/90] Remove redundant formatting sbt settings --- project/Common.scala | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/project/Common.scala b/project/Common.scala index b71dcfcc1..b9d24fb9e 100644 --- a/project/Common.scala +++ b/project/Common.scala @@ -10,10 +10,8 @@ import sbt._ import sbt.Keys._ import sbt.plugins.JvmPlugin -import org.scalafmt.sbt.ScalafmtPlugin.autoImport._ import de.heikoseeberger.sbtheader._ import com.lightbend.paradox.projectinfo.ParadoxProjectInfoPluginKeys._ -import com.lightbend.sbt.JavaFormatterPlugin.autoImport.javafmtOnCompile import com.typesafe.tools.mima.plugin.MimaKeys._ import org.mdedetrich.apache.sonatype.ApacheSonatypePlugin import sbtdynver.DynVerPlugin @@ -124,9 +122,7 @@ object Common extends AutoPlugin { // By default scalatest futures time out in 150 ms, dilate that to 600ms. // This should not impact the total test time as we don't expect to hit this // timeout. - Test / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-F", "4"), - scalafmtOnCompile := false, - javafmtOnCompile := false) + Test / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-F", "4")) override lazy val buildSettings = Seq( dynverSonatypeSnapshots := true) From 0ef66493cbebf792aa675e30cabc1e7d81e5e964 Mon Sep 17 00:00:00 2001 From: PJ Fanning Date: Mon, 1 Jan 2024 12:02:56 +0100 Subject: [PATCH 90/90] add 2024 to copyright (#303) --- NOTICE | 2 +- legal/PekkoConnectorsNotice.txt | 2 +- legal/S3Notice.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/NOTICE b/NOTICE index 598d8a966..34819c0c0 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Apache Pekko-Connectors -Copyright 2022, 2023 The Apache Software Foundation +Copyright 2022-2024 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (https://www.apache.org/). diff --git a/legal/PekkoConnectorsNotice.txt b/legal/PekkoConnectorsNotice.txt index be9af4dbd..d914ebfe7 100644 --- a/legal/PekkoConnectorsNotice.txt +++ b/legal/PekkoConnectorsNotice.txt @@ -1,5 +1,5 @@ Apache Pekko-Connectors -Copyright 2022, 2023 The Apache Software Foundation +Copyright 2022-2024 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (https://www.apache.org/). diff --git a/legal/S3Notice.txt b/legal/S3Notice.txt index 598d8a966..34819c0c0 100644 --- a/legal/S3Notice.txt +++ b/legal/S3Notice.txt @@ -1,5 +1,5 @@ Apache Pekko-Connectors -Copyright 2022, 2023 The Apache Software Foundation +Copyright 2022-2024 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (https://www.apache.org/).