diff --git a/database/src/main/postgres/runs/V1.9.5__get_partitioning_measures_by_id.sql b/database/src/main/postgres/runs/V1.9.5__get_partitioning_measures_by_id.sql new file mode 100644 index 000000000..ec5f9b53e --- /dev/null +++ b/database/src/main/postgres/runs/V1.9.5__get_partitioning_measures_by_id.sql @@ -0,0 +1,68 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Function: runs.get_partitioning_measures_by_id(Long) +CREATE OR REPLACE FUNCTION runs.get_partitioning_measures_by_id( + IN i_partitioning_id BIGINT, + OUT status INTEGER, + OUT status_text TEXT, + OUT measure_name TEXT, + OUT measured_columns TEXT[] +) RETURNS SETOF record AS +$$ +------------------------------------------------------------------------------- +-- +-- Function: runs.get_partitioning_measures_by_id(1) +-- Returns measures for the given partitioning id +-- +-- Parameters: +-- i_partitioning_id - partitioning id we are asking the measures for +-- +-- Returns: +-- status - Status code +-- status_text - Status message +-- measure_name - Name of the measure +-- measured_columns - Array of columns associated with the measure +-- +-- Status codes: +-- 11 - OK +-- 41 - Partitioning not found +-- +------------------------------------------------------------------------------- +BEGIN + + PERFORM 1 FROM runs.partitionings WHERE id_partitioning = i_partitioning_id; + + IF NOT FOUND THEN + status := 41; + status_text := 'Partitioning not found'; + RETURN NEXT; + RETURN; + END IF; + + RETURN QUERY + SELECT 11, 'OK', MD.measure_name, MD.measured_columns + FROM runs.measure_definitions AS MD + WHERE MD.fk_partitioning = i_partitioning_id; + + RETURN; + +END; +$$ +LANGUAGE plpgsql volatile SECURITY DEFINER; + +ALTER FUNCTION runs.get_partitioning_measures_by_id(BIGINT) OWNER TO atum_owner; +GRANT EXECUTE ON FUNCTION runs.get_partitioning_measures_by_id(BIGINT) TO atum_user; diff --git a/database/src/test/scala/za/co/absa/atum/database/runs/GetPartitioningMeasuresByIdV2IntegrationTests.scala b/database/src/test/scala/za/co/absa/atum/database/runs/GetPartitioningMeasuresByIdV2IntegrationTests.scala new file mode 100644 index 000000000..15e4fa75a --- /dev/null +++ b/database/src/test/scala/za/co/absa/atum/database/runs/GetPartitioningMeasuresByIdV2IntegrationTests.scala @@ -0,0 +1,95 @@ +package za.co.absa.atum.database.runs + +import za.co.absa.balta.DBTestSuite +import za.co.absa.balta.classes.JsonBString +import za.co.absa.balta.classes.setter.CustomDBType + +class GetPartitioningMeasuresByIdV2IntegrationTests extends DBTestSuite { + private val fncGetPartitioningMeasuresById = "runs.get_partitioning_measures_by_id" + + private val partitioning: JsonBString = JsonBString( + """ + |{ + | "version": 1, + | "keys": ["key1", "key3", "key2", "key4"], + | "keysToValues": { + | "key1": "valueX", + | "key2": "valueY", + | "key3": "valueZ", + | "key4": "valueA" + | } + |} + |""".stripMargin + ) + + test("Get partitioning measures by id should return partitioning measures for partitioning with measures") { + + table("runs.partitionings").insert( + add("partitioning", partitioning) + .add("created_by", "Thomas") + ) + + val fkPartitioning: Long = table("runs.partitionings") + .fieldValue("partitioning", partitioning, "id_partitioning").get.get + + table("runs.measure_definitions").insert( + add("fk_partitioning", fkPartitioning) + .add("created_by", "Thomas") + .add("measure_name", "measure1") + .add("measured_columns", CustomDBType("""{"col1"}""", "TEXT[]")) + ) + + table("runs.measure_definitions").insert( + add("fk_partitioning", fkPartitioning) + .add("created_by", "Thomas") + .add("measure_name", "measure2") + .add("measured_columns", CustomDBType("""{"col2"}""", "TEXT[]")) + ) + + function(fncGetPartitioningMeasuresById) + .setParam("i_partitioning_id", fkPartitioning) + .execute { queryResult => + val results = queryResult.next() + assert(results.getInt("status").contains(11)) + assert(results.getString("status_text").contains("OK")) + assert(results.getString("measure_name").contains("measure1")) + assert(results.getArray[String]("measured_columns").map(_.toSeq).contains(Seq("col1"))) + + val results2 = queryResult.next() + assert(results2.getInt("status").contains(11)) + assert(results2.getString("status_text").contains("OK")) + assert(results2.getString("measure_name").contains("measure2")) + assert(results2.getArray[String]("measured_columns").map(_.toSeq).contains(Seq("col2"))) + } + } + + test("Get partitioning measures by id should return error for partitioning without measures") { + + table("runs.partitionings").insert( + add("partitioning", partitioning) + .add("created_by", "Thomas") + ) + + val fkPartitioning: Long = table("runs.partitionings") + .fieldValue("partitioning", partitioning, "id_partitioning").get.get + + function(fncGetPartitioningMeasuresById) + .setParam(fkPartitioning) + .execute { queryResult => + assert(!queryResult.hasNext) + } + } + + test("Get partitioning measures by id should return an error for non-existing partitioning") { + + function(fncGetPartitioningMeasuresById) + .setParam(999) + .execute { queryResult => + val results = queryResult.next() + assert(results.getInt("status").contains(41)) + assert(results.getString("status_text").contains("Partitioning not found")) + assert(!queryResult.hasNext) // checking no more records are returned. + } + } + +} diff --git a/server/src/main/scala/za/co/absa/atum/server/Main.scala b/server/src/main/scala/za/co/absa/atum/server/Main.scala index 26498ab89..01360ea69 100644 --- a/server/src/main/scala/za/co/absa/atum/server/Main.scala +++ b/server/src/main/scala/za/co/absa/atum/server/Main.scala @@ -52,6 +52,7 @@ object Main extends ZIOAppDefault with Server { FlowRepositoryImpl.layer, CreatePartitioningIfNotExists.layer, GetPartitioningMeasures.layer, + GetPartitioningMeasuresById.layer, GetPartitioningAdditionalData.layer, GetPartitioningAdditionalDataV2.layer, CreateOrUpdateAdditionalData.layer, diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala index 2d27efa3b..12f505c35 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningController.scala @@ -47,4 +47,8 @@ trait PartitioningController { def getPartitioningV2(partitioningId: Long): IO[ErrorResponse, SingleSuccessResponse[PartitioningWithIdDTO]] + + def getPartitioningMeasuresV2( + partitioningId: Long + ): IO[ErrorResponse, MultiSuccessResponse[MeasureDTO]] } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala index 7b08e3bef..998d422b3 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/controller/PartitioningControllerImpl.scala @@ -94,6 +94,17 @@ class PartitioningControllerImpl(partitioningService: PartitioningService) ) ) } + + override def getPartitioningMeasuresV2( + partitioningId: Long + ): IO[ErrorResponse, MultiSuccessResponse[MeasureDTO]] = { + mapToMultiSuccessResponse( + serviceCall[Seq[MeasureDTO], Seq[MeasureDTO]]( + partitioningService.getPartitioningMeasuresById(partitioningId) + ) + ) + } + } object PartitioningControllerImpl { diff --git a/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresById.scala b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresById.scala new file mode 100644 index 000000000..5fc9b198b --- /dev/null +++ b/server/src/main/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresById.scala @@ -0,0 +1,46 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.database.runs.functions + +import doobie.implicits.toSqlInterpolator +import za.co.absa.atum.server.api.database.PostgresDatabaseProvider +import za.co.absa.atum.server.api.database.runs.Runs +import za.co.absa.db.fadb.DBSchema +import za.co.absa.db.fadb.doobie.DoobieEngine +import za.co.absa.db.fadb.doobie.DoobieFunction.DoobieMultipleResultFunctionWithAggStatus +import za.co.absa.db.fadb.status.aggregation.implementations.ByFirstErrorStatusAggregator +import za.co.absa.db.fadb.status.handling.implementations.StandardStatusHandling +import zio._ +import za.co.absa.atum.server.model.MeasureFromDB + +import za.co.absa.atum.server.api.database.DoobieImplicits.Sequence.get + +class GetPartitioningMeasuresById(implicit schema: DBSchema, dbEngine: DoobieEngine[Task]) + extends DoobieMultipleResultFunctionWithAggStatus[Long, MeasureFromDB, Task](values => + Seq(fr"${values}") + ) with StandardStatusHandling with ByFirstErrorStatusAggregator { + + override def fieldsToSelect: Seq[String] = super.fieldsToSelect ++ Seq("measure_name", "measured_columns") +} + +object GetPartitioningMeasuresById { + val layer: URLayer[PostgresDatabaseProvider, GetPartitioningMeasuresById] = ZLayer { + for { + dbProvider <- ZIO.service[PostgresDatabaseProvider] + } yield new GetPartitioningMeasuresById()(Runs, dbProvider.dbEngine) + } +} diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala index daa0a684f..b0f7e507e 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Endpoints.scala @@ -128,6 +128,15 @@ trait Endpoints extends BaseEndpoints { .errorOutVariantPrepend(notFoundErrorOneOfVariant) } + protected val getPartitioningMeasuresEndpointV2 + : PublicEndpoint[Long, ErrorResponse, MultiSuccessResponse[MeasureDTO], Any] = { + apiV2.get + .in(V2Paths.Partitionings / path[Long]("partitioningId") / V2Paths.Measures) + .out(statusCode(StatusCode.Ok)) + .out(jsonBody[MultiSuccessResponse[MeasureDTO]]) + .errorOutVariantPrepend(notFoundErrorOneOfVariant) + } + protected val zioMetricsEndpoint: PublicEndpoint[Unit, Unit, String, Any] = { endpoint.get.in(ZioMetrics).out(stringBody) } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala index 1177e36d1..01e1ed6ec 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/http/Routes.scala @@ -25,7 +25,7 @@ import sttp.tapir.server.http4s.ztapir.ZHttp4sServerInterpreter import sttp.tapir.server.interceptor.metrics.MetricsRequestInterceptor import sttp.tapir.swagger.bundle.SwaggerInterpreter import sttp.tapir.ztapir._ -import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataPatchDTO, CheckpointDTO, CheckpointV2DTO} +import za.co.absa.atum.model.dto.{AdditionalDataDTO, AdditionalDataPatchDTO, CheckpointV2DTO} import za.co.absa.atum.server.Constants.{SwaggerApiName, SwaggerApiVersion} import za.co.absa.atum.server.api.controller.{CheckpointController, FlowController, PartitioningController} import za.co.absa.atum.server.api.http.ApiPaths.V2Paths @@ -85,6 +85,7 @@ trait Routes extends Endpoints with ServerOptions { createServerEndpoint(getPartitioningCheckpointsEndpointV2, PartitioningController.getPartitioningCheckpointsV2), createServerEndpoint(getFlowCheckpointsEndpointV2, FlowController.getFlowCheckpointsV2), createServerEndpoint(getPartitioningEndpointV2, PartitioningController.getPartitioningV2), + createServerEndpoint(getPartitioningMeasuresEndpointV2, PartitioningController.getPartitioningMeasuresV2), createServerEndpoint(healthEndpoint, (_: Unit) => ZIO.unit) ) ZHttp4sServerInterpreter[HttpEnv.Env](http4sServerOptions(metricsInterceptorOption)).from(endpoints).toRoutes @@ -102,7 +103,8 @@ trait Routes extends Endpoints with ServerOptions { patchPartitioningAdditionalDataEndpointV2, getPartitioningCheckpointsEndpointV2, getPartitioningCheckpointEndpointV2, - getFlowCheckpointsEndpointV2 + getFlowCheckpointsEndpointV2, + getPartitioningMeasuresEndpointV2 ) ZHttp4sServerInterpreter[HttpEnv.Env](http4sServerOptions(None)) .from(SwaggerInterpreter().fromEndpoints[HttpEnv.F](endpoints, SwaggerApiName, SwaggerApiVersion)) diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala index 1e8cc84d7..14dbfc387 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepository.scala @@ -43,4 +43,6 @@ trait PartitioningRepository { def getPartitioning(partitioningId: Long): IO[DatabaseError, PartitioningWithIdDTO] + + def getPartitioningMeasuresById(partitioningId: Long): IO[DatabaseError, Seq[MeasureDTO]] } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala index 63788ea1a..3a275a363 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryImpl.scala @@ -32,7 +32,8 @@ class PartitioningRepositoryImpl( createOrUpdateAdditionalDataFn: CreateOrUpdateAdditionalData, getPartitioningCheckpointsFn: GetPartitioningCheckpoints, getPartitioningByIdFn: GetPartitioningById, - getPartitioningAdditionalDataV2Fn: GetPartitioningAdditionalDataV2 + getPartitioningAdditionalDataV2Fn: GetPartitioningAdditionalDataV2, + getPartitioningMeasuresByIdFn: GetPartitioningMeasuresById ) extends PartitioningRepository with BaseRepository { @@ -98,6 +99,14 @@ class PartitioningRepositoryImpl( } } + + override def getPartitioningMeasuresById(partitioningId: Long): IO[DatabaseError, Seq[MeasureDTO]] = { + dbMultipleResultCallWithAggregatedStatus(getPartitioningMeasuresByIdFn(partitioningId), "getPartitioningMeasures") + .map(_.map { case MeasureFromDB(measureName, measuredColumns) => + MeasureDTO(measureName.get, measuredColumns.get) + }) + } + } object PartitioningRepositoryImpl { @@ -108,8 +117,8 @@ object PartitioningRepositoryImpl { with CreateOrUpdateAdditionalData with GetPartitioningCheckpoints with GetPartitioningAdditionalDataV2 - with GetPartitioningCheckpoints - with GetPartitioningById, + with GetPartitioningById + with GetPartitioningMeasuresById, PartitioningRepository ] = ZLayer { for { @@ -120,6 +129,7 @@ object PartitioningRepositoryImpl { getPartitioningCheckpoints <- ZIO.service[GetPartitioningCheckpoints] getPartitioningById <- ZIO.service[GetPartitioningById] getPartitioningAdditionalDataV2 <- ZIO.service[GetPartitioningAdditionalDataV2] + getPartitioningMeasuresV2 <- ZIO.service[GetPartitioningMeasuresById] } yield new PartitioningRepositoryImpl( createPartitioningIfNotExists, getPartitioningMeasures, @@ -127,7 +137,8 @@ object PartitioningRepositoryImpl { createOrUpdateAdditionalData, getPartitioningCheckpoints, getPartitioningById, - getPartitioningAdditionalDataV2 + getPartitioningAdditionalDataV2, + getPartitioningMeasuresV2 ) } } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala index ab699997f..212bdeb89 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningService.scala @@ -40,4 +40,5 @@ trait PartitioningService { def getPartitioning(partitioningId: Long): IO[ServiceError, PartitioningWithIdDTO] + def getPartitioningMeasuresById(partitioningId: Long): IO[ServiceError, Seq[MeasureDTO]] } diff --git a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala index 1925568b7..fc3d55e81 100644 --- a/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala +++ b/server/src/main/scala/za/co/absa/atum/server/api/service/PartitioningServiceImpl.scala @@ -87,6 +87,14 @@ class PartitioningServiceImpl(partitioningRepository: PartitioningRepository) override def getPartitioning(partitioningId: Long): IO[ServiceError, PartitioningWithIdDTO] = { repositoryCall(partitioningRepository.getPartitioning(partitioningId), "getPartitioning") } + + override def getPartitioningMeasuresById(partitioningId: Long): IO[ServiceError, Seq[MeasureDTO]] = { + repositoryCall( + partitioningRepository.getPartitioningMeasuresById(partitioningId), + "getPartitioningMeasuresById" + ) + } + } object PartitioningServiceImpl { diff --git a/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresByIdV2IntegrationTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresByIdV2IntegrationTests.scala new file mode 100644 index 000000000..b6f415332 --- /dev/null +++ b/server/src/test/scala/za/co/absa/atum/server/api/database/runs/functions/GetPartitioningMeasuresByIdV2IntegrationTests.scala @@ -0,0 +1,32 @@ +package za.co.absa.atum.server.api.database.runs.functions + +import za.co.absa.atum.server.ConfigProviderTest +import za.co.absa.atum.server.api.TestTransactorProvider +import za.co.absa.atum.server.api.database.PostgresDatabaseProvider +import za.co.absa.db.fadb.exceptions.DataNotFoundException +import zio.interop.catz.asyncInstance +import za.co.absa.db.fadb.status.FunctionStatus +import zio.test.{Spec, TestEnvironment, assertTrue} +import zio.{Scope, ZIO} + +object GetPartitioningMeasuresByIdV2IntegrationTests extends ConfigProviderTest { + + override def spec: Spec[TestEnvironment with Scope, Any] = { + + suite("GetPartitioningMeasuresSuite")( + test("Returns expected sequence of Measures with existing partitioning") { + val partitioningID: Long = 1L + + for { + getPartitioningMeasuresV2 <- ZIO.service[GetPartitioningMeasuresById] + result <- getPartitioningMeasuresV2(partitioningID) + } yield assertTrue(result == Left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) + } + ).provide( + GetPartitioningMeasuresById.layer, + PostgresDatabaseProvider.layer, + TestTransactorProvider.layerWithRollback + ) + } + +} diff --git a/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningMeasuresV2EndpointUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningMeasuresV2EndpointUnitTests.scala new file mode 100644 index 000000000..0cc6b6d4c --- /dev/null +++ b/server/src/test/scala/za/co/absa/atum/server/api/http/GetPartitioningMeasuresV2EndpointUnitTests.scala @@ -0,0 +1,93 @@ +/* + * Copyright 2021 ABSA Group Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package za.co.absa.atum.server.api.http + +import org.mockito.Mockito.{mock, when} +import sttp.client3.testing.SttpBackendStub +import sttp.client3._ +import sttp.tapir.server.stub.TapirStubInterpreter +import sttp.tapir.ztapir.{RIOMonadError, RichZEndpoint} +import sttp.client3.circe._ +import sttp.model.StatusCode +import za.co.absa.atum.model.dto.MeasureDTO +import za.co.absa.atum.server.api.TestData +import za.co.absa.atum.server.api.controller.PartitioningController +import za.co.absa.atum.server.model.{GeneralErrorResponse, NotFoundErrorResponse} +import za.co.absa.atum.server.model.SuccessResponse.MultiSuccessResponse +import zio._ +import zio.test.{Spec, TestEnvironment, ZIOSpecDefault, assertTrue} + +object GetPartitioningMeasuresV2EndpointUnitTests extends ZIOSpecDefault with Endpoints with TestData { + + private val partitioningControllerMock = mock(classOf[PartitioningController]) + + when(partitioningControllerMock.getPartitioningMeasuresV2(1L)) + .thenReturn(ZIO.succeed(MultiSuccessResponse(Seq(measureDTO1, measureDTO2), uuid1))) + when(partitioningControllerMock.getPartitioningMeasuresV2(2L)) + .thenReturn(ZIO.fail(GeneralErrorResponse("error"))) + when(partitioningControllerMock.getPartitioningMeasuresV2(3L)) + .thenReturn(ZIO.fail(NotFoundErrorResponse("boom!"))) + + private val partitioningControllerMockLayer = ZLayer.succeed(partitioningControllerMock) + + private val getPartitioningMeasuresServerEndpoint = + getPartitioningMeasuresEndpointV2.zServerLogic({partitioningId: Long => + PartitioningController.getPartitioningMeasuresV2(partitioningId) + }) + + def spec: Spec[TestEnvironment with Scope, Any] = { + val backendStub = TapirStubInterpreter(SttpBackendStub.apply(new RIOMonadError[PartitioningController])) + .whenServerEndpoint(getPartitioningMeasuresServerEndpoint) + .thenRunLogic() + .backend() + + def createBasicRequest(id: Long): RequestT[Identity, Either[ResponseException[String, io.circe.Error], MultiSuccessResponse[MeasureDTO]], Any] = { + basicRequest + .get(uri"https://test.com/api/v2/partitionings/$id/measures") + .response(asJson[MultiSuccessResponse[MeasureDTO]]) + } + + suite("GetPartitioningMeasuresV2EndpointSuite")( + test("Returns expected MeasureDTO") { + for { + response <- createBasicRequest(1L).send(backendStub) + body <- ZIO.fromEither(response.body) + statusCode = response.code + } yield { + assertTrue(body.data == MultiSuccessResponse(Seq(measureDTO1, measureDTO2), uuid1).data, statusCode == StatusCode.Ok) + } + }, + test("Returns expected general error") { + for { + response <- createBasicRequest(2L).send(backendStub) + statusCode = response.code + } yield { + assertTrue(statusCode == StatusCode.BadRequest) + } + }, + test("Returns expected not found error") { + for { + response <- createBasicRequest(3L).send(backendStub) + statusCode = response.code + } yield { + assertTrue(statusCode == StatusCode.NotFound) + } + } + ) + }.provide(partitioningControllerMockLayer) +} + diff --git a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala index 520a77792..4e2bec8e1 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/repository/PartitioningRepositoryUnitTests.scala @@ -114,6 +114,19 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { private val getPartitioningAdditionalDataV2MockLayer = ZLayer.succeed(getPartitioningAdditionalDataV2Mock) + private val getPartitioningMeasuresV2Mock = mock(classOf[GetPartitioningMeasuresById]) + + when(getPartitioningMeasuresV2Mock.apply(1L)).thenReturn( + ZIO.right(Seq(Row(FunctionStatus(0, "success"), measureFromDB1), Row(FunctionStatus(0, "success"), measureFromDB2)))) + when(getPartitioningMeasuresV2Mock.apply(2L)) + .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(41, "Partitioning not found")))) + when(getPartitioningMeasuresV2Mock.apply(3L)) + .thenReturn(ZIO.left(DataNotFoundException(FunctionStatus(42, "Measures not found")))) + when(getPartitioningMeasuresV2Mock.apply(4L)).thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + + private val getPartitioningMeasuresV2MockLayer = ZLayer.succeed(getPartitioningMeasuresV2Mock) + + override def spec: Spec[TestEnvironment with Scope, Any] = { suite("PartitioningRepositorySuite")( @@ -241,6 +254,28 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { failsWithA[GeneralDatabaseError] ) } + ), + suite("GetPartitioningMeasuresByIdSuite")( + test("Returns expected Seq") { + for { + result <- PartitioningRepository.getPartitioningMeasuresById(1L) + } yield assertTrue(result == Seq(measureDTO1, measureDTO2)) + }, + test("Returns expected NotFoundDatabaseError") { + assertZIO(PartitioningRepository.getPartitioningMeasuresById(2L).exit)( + failsWithA[NotFoundDatabaseError] + ) + }, + test("Returns expected NotFoundDatabaseError") { + assertZIO(PartitioningRepository.getPartitioningMeasuresById(3L).exit)( + failsWithA[NotFoundDatabaseError] + ) + }, + test("Returns expected GeneralDatabaseError") { + assertZIO(PartitioningRepository.getPartitioningMeasuresById(4L).exit)( + failsWithA[GeneralDatabaseError] + ) + } ) ).provide( PartitioningRepositoryImpl.layer, @@ -250,7 +285,8 @@ object PartitioningRepositoryUnitTests extends ZIOSpecDefault with TestData { createOrUpdateAdditionalDataMockLayer, getPartitioningCheckpointsMockLayer, getPartitioningByIdMockLayer, - getPartitioningAdditionalDataV2MockLayer + getPartitioningAdditionalDataV2MockLayer, + getPartitioningMeasuresV2MockLayer ) } diff --git a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala index c297ad18a..b7db40174 100644 --- a/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala +++ b/server/src/test/scala/za/co/absa/atum/server/api/service/PartitioningServiceUnitTests.scala @@ -69,6 +69,13 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { when(partitioningRepositoryMock.getPartitioning(8888L)) .thenReturn(ZIO.fail(NotFoundDatabaseError("Partitioning not found"))) + when(partitioningRepositoryMock.getPartitioningMeasuresById(1L)) + .thenReturn(ZIO.succeed(Seq(measureDTO1, measureDTO2))) + when(partitioningRepositoryMock.getPartitioningMeasuresById(2L)) + .thenReturn(ZIO.fail(NotFoundDatabaseError("boom!"))) + when(partitioningRepositoryMock.getPartitioningMeasuresById(3L)) + .thenReturn(ZIO.fail(GeneralDatabaseError("boom!"))) + private val partitioningRepositoryMockLayer = ZLayer.succeed(partitioningRepositoryMock) override def spec: Spec[TestEnvironment with Scope, Any] = { @@ -184,6 +191,23 @@ object PartitioningServiceUnitTests extends ZIOSpecDefault with TestData { result == Exit.fail(NotFoundServiceError("Failed to perform 'getPartitioning': Partitioning not found")) ) } + ), + suite("GetPartitioningMeasuresByIdSuite")( + test("Returns expected Right with Seq[MeasureDTO]") { + for { + result <- PartitioningService.getPartitioningMeasuresById(1L) + } yield assertTrue(result == Seq(measureDTO1, measureDTO2)) + }, + test("Returns expected ServiceError") { + assertZIO(PartitioningService.getPartitioningMeasuresById(2L).exit)( + failsWithA[NotFoundServiceError] + ) + }, + test("Returns expected ServiceError") { + assertZIO(PartitioningService.getPartitioningMeasuresById(3L).exit)( + failsWithA[GeneralServiceError] + ) + } ) ).provide( PartitioningServiceImpl.layer,