diff --git a/.github/workflows/java-client.yml b/.github/workflows/java-client.yml index db45a21..9975912 100644 --- a/.github/workflows/java-client.yml +++ b/.github/workflows/java-client.yml @@ -35,4 +35,4 @@ jobs: run: chmod +x gradlew - name: Build with Gradle - run: ./gradlew :java-client:build + run: ./gradlew :java-client:spotlessCheck :java-client:build diff --git a/.github/workflows/record-store.yml b/.github/workflows/record-store.yml index d79672f..7151c6c 100644 --- a/.github/workflows/record-store.yml +++ b/.github/workflows/record-store.yml @@ -35,4 +35,4 @@ jobs: run: chmod +x gradlew - name: Build with Gradle - run: ./gradlew :record-store:build + run: ./gradlew :record-store:spotlessCheck :record-store:build diff --git a/.github/workflows/testcontainers-foundationdb.yml b/.github/workflows/testcontainers-foundationdb.yml index 4c61693..3c57b0b 100644 --- a/.github/workflows/testcontainers-foundationdb.yml +++ b/.github/workflows/testcontainers-foundationdb.yml @@ -26,4 +26,4 @@ jobs: run: chmod +x gradlew - name: Build with Gradle - run: ./gradlew :testcontainers-foundationdb:build + run: ./gradlew :testcontainers-foundationdb:spotlessCheck :testcontainers-foundationdb:build diff --git a/HEADER b/HEADER deleted file mode 100644 index d7ec977..0000000 --- a/HEADER +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020 Pierre Zemb - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/README.md b/README.md index 7056efb..f7f9f88 100644 --- a/README.md +++ b/README.md @@ -79,3 +79,9 @@ To run your application: ``` ./gradlew :record-store:run ``` + +To format: +```bash +gradle :record-store:spotlessApply :java-client:spotlessApply :testcontainers-foundationdb:spotlessApply +gradle :record-store:spotlessCheck :java-client:spotlessCheck :testcontainers-foundationdb:spotlessCheck +``` diff --git a/build.gradle b/build.gradle index e3529dd..63885c5 100644 --- a/build.gradle +++ b/build.gradle @@ -1,15 +1,18 @@ plugins { - id "com.github.hierynomus.license" version"0.15.0" + id "com.diffplug.spotless" version "5.8.2" } allprojects { - apply plugin: 'com.github.hierynomus.license' + apply plugin: 'com.diffplug.spotless' repositories { mavenCentral() } - license { - header rootProject.file('HEADER') - include "**/*.java" + spotless { + java { + removeUnusedImports() + googleJavaFormat() + licenseHeaderFile "$rootDir/license.header" + } } } ext { diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html index 388780d..33cc7ae 100644 --- a/docs/_layouts/default.html +++ b/docs/_layouts/default.html @@ -34,6 +34,8 @@
Examples
+ Managed schema +
Q&A

diff --git a/docs/index.md b/docs/index.md index 73d4b2b..c04de13 100644 --- a/docs/index.md +++ b/docs/index.md @@ -18,6 +18,9 @@ A light, multi-model, user-defined place for your data. * [gRPC](https://grpc.io) * *very experimental* [GraphQL](https://graphql.org) +* **Battery included** + * Additionally to `RecordSpaces`, the Record-Store is offering dedicated developer experiences, such as *Key-Value* + * **Scalable** We are based on the same tech behind [CloudKit](https://www.foundationdb.org/files/record-layer-paper.pdf) called the [Record Layer](https://github.com/foundationdb/fdb-record-layer/). CloudKit uses the Record Layer to host billions of independent databases. The name of this project itself is a tribute to the Record Layer as we are exposing the layer within a gRPC interface. * **Transactional** We are running on top of [FoundationDB](https://www.foundationdb.org/). FoundationDB gives you the power of ACID transactions in a distributed database. diff --git a/docs/managed-schema.md b/docs/managed-schema.md new file mode 100644 index 0000000..b4cb388 --- /dev/null +++ b/docs/managed-schema.md @@ -0,0 +1,33 @@ +--- +title: Managed schemas +--- + +In addition to the `recordSpaces` which allow you to define your own schemas, you also have the possibility to use `managedSchemas`. They are offering their own APIs and gRPC endpoints with a fixed schemas provided by the Record-Store. + +## Managed KeyValue + +Do you need a Key/Value experience? You can easily use this dedicated gRPC endpoint: + +```grpc +service ManagedKV { + rpc put(KeyValue) returns (EmptyResponse); + rpc delete(DeleteRequest) returns (EmptyResponse); + rpc scan(ScanRequest) returns (stream KeyValue); +} + +message EmptyResponse {} + +message DeleteRequest { + bytes key_to_delete = 1; +} + +message ScanRequest { + bytes start_key = 1; + bytes end_key = 2; +} + +message KeyValue { + bytes key = 1; + bytes value = 2; +} +``` diff --git a/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordField.java b/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordField.java index 066ec28..5422517 100644 --- a/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordField.java +++ b/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordField.java @@ -16,13 +16,11 @@ package fr.pierrezemb.recordstore.client; import fr.pierrezemb.recordstore.proto.RecordStoreProtocol; -import org.jetbrains.annotations.NotNull; - import javax.annotation.Nonnull; +import org.jetbrains.annotations.NotNull; public class RecordField { - @Nonnull - private final String fieldName; + @Nonnull private final String fieldName; public RecordField(@NotNull String fieldName) { this.fieldName = fieldName; @@ -92,16 +90,19 @@ public RecordStoreProtocol.QueryFilterNode greaterThanOrEquals(float value) { return createQueryFilter(RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS, value); } - private RecordStoreProtocol.QueryFilterNode createQueryFilter(RecordStoreProtocol.FilterOperation fieldOperation, Object value) { + private RecordStoreProtocol.QueryFilterNode createQueryFilter( + RecordStoreProtocol.FilterOperation fieldOperation, Object value) { return RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(createQueryFilterFieldNode(fieldOperation, value)) - .build(); + .setFieldNode(createQueryFilterFieldNode(fieldOperation, value)) + .build(); } - private RecordStoreProtocol.QueryFilterFieldNode createQueryFilterFieldNode(RecordStoreProtocol.FilterOperation fieldOperation, Object value) { - RecordStoreProtocol.QueryFilterFieldNode.Builder builder = RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField(fieldName) - .setOperation(fieldOperation); + private RecordStoreProtocol.QueryFilterFieldNode createQueryFilterFieldNode( + RecordStoreProtocol.FilterOperation fieldOperation, Object value) { + RecordStoreProtocol.QueryFilterFieldNode.Builder builder = + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField(fieldName) + .setOperation(fieldOperation); if (value instanceof Integer) { builder.setInt32Value((Integer) value); @@ -121,5 +122,4 @@ private RecordStoreProtocol.QueryFilterFieldNode createQueryFilterFieldNode(Reco return builder.build(); } - } diff --git a/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordStoreClient.java b/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordStoreClient.java index 52950f9..0333b4b 100644 --- a/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordStoreClient.java +++ b/java-client/src/main/java/fr/pierrezemb/recordstore/client/RecordStoreClient.java @@ -25,7 +25,6 @@ import fr.pierrezemb.recordstore.proto.SchemaServiceGrpc; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; - import java.util.Iterator; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -43,7 +42,8 @@ public class RecordStoreClient { private final AdminServiceGrpc.AdminServiceFutureStub asyncAdminStub; private final RecordServiceGrpc.RecordServiceBlockingStub syncRecordStub; - private RecordStoreClient(String tenant, String recordSpace, String address, String token) throws InterruptedException, ExecutionException, TimeoutException { + private RecordStoreClient(String tenant, String recordSpace, String address, String token) + throws InterruptedException, ExecutionException, TimeoutException { this.tenant = tenant; this.recordSpace = recordSpace; this.address = address; @@ -70,7 +70,8 @@ public ListenableFuture ping() { return this.asyncAdminStub.ping(RecordStoreProtocol.EmptyRequest.newBuilder().build()); } - public ListenableFuture upsertSchema(RecordStoreProtocol.UpsertSchemaRequest request) { + public ListenableFuture upsertSchema( + RecordStoreProtocol.UpsertSchemaRequest request) { return this.asyncSchemaStub.upsert(request); } @@ -78,18 +79,21 @@ public ListenableFuture putRecord(Message rec return this.putRecord(record.getClass().getSimpleName(), record.toByteArray()); } - public ListenableFuture putRecord(String recordTypeName, byte[] message) { - return this.asyncRecordStub.put(RecordStoreProtocol.PutRecordRequest.newBuilder() - .setMessage(ByteString.copyFrom(message)) - .setRecordTypeName(recordTypeName) - .build()); + public ListenableFuture putRecord( + String recordTypeName, byte[] message) { + return this.asyncRecordStub.put( + RecordStoreProtocol.PutRecordRequest.newBuilder() + .setMessage(ByteString.copyFrom(message)) + .setRecordTypeName(recordTypeName) + .build()); } public ListenableFuture getStats() { return asyncSchemaStub.stat(RecordStoreProtocol.StatRequest.newBuilder().build()); } - public Iterator queryRecords(RecordStoreProtocol.QueryRequest request) { + public Iterator queryRecords( + RecordStoreProtocol.QueryRequest request) { return syncRecordStub.query(request); } @@ -120,7 +124,8 @@ public Builder withToken(String token) { return this; } - public RecordStoreClient connect() throws InterruptedException, ExecutionException, TimeoutException { + public RecordStoreClient connect() + throws InterruptedException, ExecutionException, TimeoutException { return new RecordStoreClient(tenant, recordSpace, address, token); } } diff --git a/java-client/src/main/java/fr/pierrezemb/recordstore/client/SchemaUtils.java b/java-client/src/main/java/fr/pierrezemb/recordstore/client/SchemaUtils.java index becfc02..68c3d8f 100644 --- a/java-client/src/main/java/fr/pierrezemb/recordstore/client/SchemaUtils.java +++ b/java-client/src/main/java/fr/pierrezemb/recordstore/client/SchemaUtils.java @@ -19,72 +19,85 @@ import com.google.protobuf.Descriptors; import fr.pierrezemb.recordstore.proto.RecordStoreProtocol; import fr.pierrezemb.recordstore.utils.protobuf.ProtobufReflectionUtil; - import java.util.Collections; import java.util.List; -/** - * A Utils class that can be used to easily generate UpsertSchema requests - */ +/** A Utils class that can be used to easily generate UpsertSchema requests */ public class SchemaUtils { public static RecordStoreProtocol.UpsertSchemaRequest createSchemaRequest( - Descriptors.Descriptor descriptor, - List indexDefinitionList) { + Descriptors.Descriptor descriptor, + List indexDefinitionList) { DescriptorProtos.FileDescriptorSet dependencies = - ProtobufReflectionUtil.protoFileDescriptorSet(descriptor); + ProtobufReflectionUtil.protoFileDescriptorSet(descriptor); return RecordStoreProtocol.UpsertSchemaRequest.newBuilder() - .setSchema(dependencies) - .addAllRecordTypeIndexDefinitions(indexDefinitionList) - .build(); + .setSchema(dependencies) + .addAllRecordTypeIndexDefinitions(indexDefinitionList) + .build(); } public static RecordStoreProtocol.UpsertSchemaRequest createSchemaRequest( - Descriptors.Descriptor descriptor, - String recordTypeName, - String primaryKey, - String indexField, RecordStoreProtocol.IndexType indexType) { - return createSchemaRequest(descriptor, Collections.singletonList( - createIndex(recordTypeName, primaryKey, Collections.singletonList(createIndexDefinition(indexField, indexType))) - )); + Descriptors.Descriptor descriptor, + String recordTypeName, + String primaryKey, + String indexField, + RecordStoreProtocol.IndexType indexType) { + return createSchemaRequest( + descriptor, + Collections.singletonList( + createIndex( + recordTypeName, + primaryKey, + Collections.singletonList(createIndexDefinition(indexField, indexType))))); } public static RecordStoreProtocol.UpsertSchemaRequest createSchemaRequest( - Descriptors.Descriptor descriptor, - RecordStoreProtocol.RecordTypeIndexDefinition indexDefinition) { + Descriptors.Descriptor descriptor, + RecordStoreProtocol.RecordTypeIndexDefinition indexDefinition) { return createSchemaRequest(descriptor, Collections.singletonList(indexDefinition)); } - public static RecordStoreProtocol.RecordTypeIndexDefinition createIndex(String name, String primaryKeyField, RecordStoreProtocol.IndexDefinition indexDefinition) { - return createIndex(name, Collections.singletonList(primaryKeyField), Collections.singletonList(indexDefinition)); + public static RecordStoreProtocol.RecordTypeIndexDefinition createIndex( + String name, String primaryKeyField, RecordStoreProtocol.IndexDefinition indexDefinition) { + return createIndex( + name, + Collections.singletonList(primaryKeyField), + Collections.singletonList(indexDefinition)); } - public static RecordStoreProtocol.RecordTypeIndexDefinition createIndex(String name, String primaryKeyField, List indexDefinitions) { + public static RecordStoreProtocol.RecordTypeIndexDefinition createIndex( + String name, + String primaryKeyField, + List indexDefinitions) { return createIndex(name, Collections.singletonList(primaryKeyField), indexDefinitions); } - - public static RecordStoreProtocol.RecordTypeIndexDefinition createIndex(String name, List primaryKeyFields, List indexDefinitions) { + public static RecordStoreProtocol.RecordTypeIndexDefinition createIndex( + String name, + List primaryKeyFields, + List indexDefinitions) { return RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() - .setName(name) - .addAllIndexDefinitions(indexDefinitions) - .addAllPrimaryKeyFields(primaryKeyFields) - .build(); + .setName(name) + .addAllIndexDefinitions(indexDefinitions) + .addAllPrimaryKeyFields(primaryKeyFields) + .build(); } - public static RecordStoreProtocol.IndexDefinition createIndexDefinition(String field, RecordStoreProtocol.IndexType indexType) { + public static RecordStoreProtocol.IndexDefinition createIndexDefinition( + String field, RecordStoreProtocol.IndexType indexType) { return RecordStoreProtocol.IndexDefinition.newBuilder() - .setField(field) - .setIndexType(indexType) - .build(); + .setField(field) + .setIndexType(indexType) + .build(); } - public static RecordStoreProtocol.IndexDefinition createIndexDefinition(String field, RecordStoreProtocol.IndexType indexType, RecordStoreProtocol.FanType fanType) { + public static RecordStoreProtocol.IndexDefinition createIndexDefinition( + String field, RecordStoreProtocol.IndexType indexType, RecordStoreProtocol.FanType fanType) { return RecordStoreProtocol.IndexDefinition.newBuilder() - .setField(field) - .setIndexType(indexType) - .setFanType(fanType) - .build(); + .setField(field) + .setIndexType(indexType) + .setFanType(fanType) + .build(); } } diff --git a/java-client/src/test/java/fr/pierrezemb/recordstore/client/PortManager.java b/java-client/src/test/java/fr/pierrezemb/recordstore/client/PortManager.java index eb59392..7f8f664 100644 --- a/java-client/src/test/java/fr/pierrezemb/recordstore/client/PortManager.java +++ b/java-client/src/test/java/fr/pierrezemb/recordstore/client/PortManager.java @@ -26,7 +26,7 @@ public static synchronized int nextFreePort() { int port = nextPort++; try (ServerSocket ss = new ServerSocket(port)) { ss.close(); - //Give it some time to truly close the connection + // Give it some time to truly close the connection Thread.sleep(100); return port; } catch (Exception e) { diff --git a/java-client/src/test/java/fr/pierrezemb/recordstore/client/RecordStoreClientTest.java b/java-client/src/test/java/fr/pierrezemb/recordstore/client/RecordStoreClientTest.java index c249849..c894451 100644 --- a/java-client/src/test/java/fr/pierrezemb/recordstore/client/RecordStoreClientTest.java +++ b/java-client/src/test/java/fr/pierrezemb/recordstore/client/RecordStoreClientTest.java @@ -15,6 +15,9 @@ */ package fr.pierrezemb.recordstore.client; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import com.google.protobuf.InvalidProtocolBufferException; import fr.pierrezemb.recordstore.Constants; import fr.pierrezemb.recordstore.GrpcVerticle; @@ -27,6 +30,10 @@ import io.vertx.core.json.JsonObject; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; +import java.io.File; +import java.util.Collections; +import java.util.Iterator; +import java.util.concurrent.ExecutionException; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Order; @@ -37,14 +44,6 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.testcontainers.containers.AbstractFDBContainer; -import java.io.File; -import java.util.Collections; -import java.util.Iterator; -import java.util.concurrent.ExecutionException; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - @ExtendWith(VertxExtension.class) @TestInstance(TestInstance.Lifecycle.PER_CLASS) @TestMethodOrder(MethodOrderer.OrderAnnotation.class) @@ -59,29 +58,34 @@ void deploy_verticle(Vertx vertx, VertxTestContext testContext) { File clusterFile = container.getClusterFile(); - DeploymentOptions options = new DeploymentOptions() - .setConfig(new JsonObject() - .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) - .put(Constants.CONFIG_LOAD_DEMO, "USER") - .put(Constants.CONFIG_GRPC_LISTEN_PORT, port)); + DeploymentOptions options = + new DeploymentOptions() + .setConfig( + new JsonObject() + .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) + .put(Constants.CONFIG_LOAD_DEMO, "USER") + .put(Constants.CONFIG_GRPC_LISTEN_PORT, port)); BiscuitManager biscuitManager = new BiscuitManager(); - sealedBiscuit = biscuitManager.create(DatasetsLoader.DEFAULT_DEMO_TENANT, Collections.emptyList()); + sealedBiscuit = + biscuitManager.create(DatasetsLoader.DEFAULT_DEMO_TENANT, Collections.emptyList()); // deploy verticle - vertx.deployVerticle(new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); + vertx.deployVerticle( + new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); } @Test @Order(1) public void testCreateClient(Vertx vertx, VertxTestContext testContext) throws Exception { - recordStoreClient = new RecordStoreClient.Builder() - .withRecordSpace(this.getClass().getName()) - .withTenant(DatasetsLoader.DEFAULT_DEMO_TENANT) - .withToken(sealedBiscuit) - .withAddress("localhost:" + port) - .connect(); + recordStoreClient = + new RecordStoreClient.Builder() + .withRecordSpace(this.getClass().getName()) + .withTenant(DatasetsLoader.DEFAULT_DEMO_TENANT) + .withToken(sealedBiscuit) + .withAddress("localhost:" + port) + .connect(); recordStoreClient.ping().get(); testContext.completeNow(); @@ -89,15 +93,17 @@ public void testCreateClient(Vertx vertx, VertxTestContext testContext) throws E @Order(2) @RepeatedTest(3) - public void testUploadSchema(Vertx vertx, VertxTestContext testContext) throws ExecutionException, InterruptedException { - - RecordStoreProtocol.UpsertSchemaRequest request = SchemaUtils.createSchemaRequest( - DemoUserProto.User.getDescriptor(), // descriptor - DemoUserProto.User.class.getSimpleName(), // name of the recordType - "id", // primary key field - "name", // index field - RecordStoreProtocol.IndexType.VALUE // index type - ); + public void testUploadSchema(Vertx vertx, VertxTestContext testContext) + throws ExecutionException, InterruptedException { + + RecordStoreProtocol.UpsertSchemaRequest request = + SchemaUtils.createSchemaRequest( + DemoUserProto.User.getDescriptor(), // descriptor + DemoUserProto.User.class.getSimpleName(), // name of the recordType + "id", // primary key field + "name", // index field + RecordStoreProtocol.IndexType.VALUE // index type + ); recordStoreClient.upsertSchema(request).get(); @@ -106,13 +112,15 @@ public void testUploadSchema(Vertx vertx, VertxTestContext testContext) throws E @Order(3) @RepeatedTest(3) - public void testPut(Vertx vertx, VertxTestContext testContext) throws ExecutionException, InterruptedException { + public void testPut(Vertx vertx, VertxTestContext testContext) + throws ExecutionException, InterruptedException { - DemoUserProto.User record = DemoUserProto.User.newBuilder() - .setId(999) - .setName("Pierre Zemb") - .setEmail("pz@example.org") - .build(); + DemoUserProto.User record = + DemoUserProto.User.newBuilder() + .setId(999) + .setName("Pierre Zemb") + .setEmail("pz@example.org") + .build(); recordStoreClient.putRecord(record).get(); @@ -121,7 +129,8 @@ public void testPut(Vertx vertx, VertxTestContext testContext) throws ExecutionE @Test @Order(4) - public void testGetStats(Vertx vertx, VertxTestContext testContext) throws ExecutionException, InterruptedException { + public void testGetStats(Vertx vertx, VertxTestContext testContext) + throws ExecutionException, InterruptedException { RecordStoreProtocol.StatResponse stats = recordStoreClient.getStats().get(); assertEquals("bad count of records", 1, stats.getCount()); @@ -130,21 +139,23 @@ public void testGetStats(Vertx vertx, VertxTestContext testContext) throws Execu testContext.completeNow(); } - @Test @Order(5) - public void testQuery(Vertx vertx, VertxTestContext testContext) throws ExecutionException, InterruptedException, InvalidProtocolBufferException { + public void testQuery(Vertx vertx, VertxTestContext testContext) + throws ExecutionException, InterruptedException, InvalidProtocolBufferException { - RecordStoreProtocol.QueryRequest request = RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName(DemoUserProto.User.class.getSimpleName()) - .setFilter(RecordQuery.field("id").lessThan(1000L)) - .setResultLimit(1) - .build(); + RecordStoreProtocol.QueryRequest request = + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName(DemoUserProto.User.class.getSimpleName()) + .setFilter(RecordQuery.field("id").lessThan(1000L)) + .setResultLimit(1) + .build(); Iterator results = recordStoreClient.queryRecords(request); assertTrue("bad length of results", results.hasNext()); - DemoUserProto.User response = DemoUserProto.User.parseFrom(results.next().getRecord().toByteArray()); + DemoUserProto.User response = + DemoUserProto.User.parseFrom(results.next().getRecord().toByteArray()); assertEquals("bad id", 999, response.getId()); assertEquals("bad name", "Pierre Zemb", response.getName()); assertEquals("bad mail", "pz@example.org", response.getEmail()); diff --git a/license.header b/license.header new file mode 100644 index 0000000..49d67e2 --- /dev/null +++ b/license.header @@ -0,0 +1,15 @@ +/** + * Copyright 2020 Pierre Zemb + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/Constants.java b/record-store/src/main/java/fr/pierrezemb/recordstore/Constants.java index c3d8322..77260c0 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/Constants.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/Constants.java @@ -17,7 +17,8 @@ public class Constants { public static final String CONFIG_ENCRYPTION_KEY_DEFAULT = "6B58703273357638792F423F4528482B"; - public static final String CONFIG_BISCUIT_KEY_DEFAULT = "3A8621F1847F19D6DAEAB5465CE8D3908B91C66FB9AF380D508FCF9253458907"; + public static final String CONFIG_BISCUIT_KEY_DEFAULT = + "3A8621F1847F19D6DAEAB5465CE8D3908B91C66FB9AF380D508FCF9253458907"; public static final String CONFIG_FDB_CLUSTER_FILE = "fdb-cluster-file"; public static final String CONFIG_FDB_CLUSTER_FILE_DEFAULT = "/var/fdb/fdb.cluster"; @@ -29,4 +30,7 @@ public class Constants { public static final String CONFIG_GRAPHQL_LISTEN_PORT = "graphql-listen-port"; public static final String CONFIG_LOAD_DEMO = "load-demo"; + + public static final String CONFIG_ENABLE_MANAGED_KV = "enable-managed-kv"; + public static final Boolean CONFIG_ENABLE_MANAGED_KV_DEFAULT = true; } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/GraphQLVerticle.java b/record-store/src/main/java/fr/pierrezemb/recordstore/GraphQLVerticle.java index e993e54..89519f8 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/GraphQLVerticle.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/GraphQLVerticle.java @@ -27,11 +27,10 @@ import io.vertx.ext.web.RoutingContext; import io.vertx.ext.web.handler.graphql.GraphiQLHandler; import io.vertx.ext.web.handler.graphql.GraphiQLHandlerOptions; +import javax.crypto.spec.SecretKeySpec; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.crypto.spec.SecretKeySpec; - public class GraphQLVerticle extends AbstractVerticle { private static final Logger LOGGER = LoggerFactory.getLogger(GraphQLVerticle.class); private RecordLayer recordLayer; @@ -44,13 +43,23 @@ public static void main(String[] args) { public void start(Promise startPromise) throws Exception { Integer port = this.context.config().getInteger(Constants.CONFIG_GRAPHQL_LISTEN_PORT, 8081); - GraphiQLHandlerOptions options = new GraphiQLHandlerOptions() - .setQuery("{ allRecords { name } }") - .setHeaders(ImmutableMap.of("tenant", "my-tenant", "recordSpace", "my-recordSpace")) - .setEnabled(true); + GraphiQLHandlerOptions options = + new GraphiQLHandlerOptions() + .setQuery("{ allRecords { name } }") + .setHeaders(ImmutableMap.of("tenant", "my-tenant", "recordSpace", "my-recordSpace")) + .setEnabled(true); - String clusterFilePath = this.context.config().getString(Constants.CONFIG_FDB_CLUSTER_FILE, Constants.CONFIG_FDB_CLUSTER_FILE_DEFAULT); - byte[] key = this.context.config().getString(Constants.CONFIG_ENCRYPTION_KEY_DEFAULT, Constants.CONFIG_ENCRYPTION_KEY_DEFAULT).getBytes(); + String clusterFilePath = + this.context + .config() + .getString( + Constants.CONFIG_FDB_CLUSTER_FILE, Constants.CONFIG_FDB_CLUSTER_FILE_DEFAULT); + byte[] key = + this.context + .config() + .getString( + Constants.CONFIG_ENCRYPTION_KEY_DEFAULT, Constants.CONFIG_ENCRYPTION_KEY_DEFAULT) + .getBytes(); SecretKeySpec secretKey = new SecretKeySpec(key, "AES"); recordLayer = new RecordLayer(clusterFilePath, vertx.isMetricsEnabled(), secretKey); @@ -65,9 +74,7 @@ public void start(Promise startPromise) throws Exception { LOGGER.info("starting graphQL server on {}", port); - vertx.createHttpServer() - .requestHandler(router) - .listen(port); + vertx.createHttpServer().requestHandler(router).listen(port); startPromise.complete(); } @@ -85,7 +92,11 @@ private void getSchema(RoutingContext routingContext) { try { RecordMetaData metadata = this.recordLayer.getSchema(tenant, recordSpace); String schema = GraphQLSchemaGenerator.generate(metadata); - routingContext.response().putHeader("Content-Type", "text/plain").setStatusCode(200).end(schema); + routingContext + .response() + .putHeader("Content-Type", "text/plain") + .setStatusCode(200) + .end(schema); } catch (RuntimeException e) { LOGGER.error(e.getMessage()); routingContext.response().setStatusCode(500).end(e.getMessage()); diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/GrpcVerticle.java b/record-store/src/main/java/fr/pierrezemb/recordstore/GrpcVerticle.java index 54e269f..f4b7e04 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/GrpcVerticle.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/GrpcVerticle.java @@ -15,25 +15,23 @@ */ package fr.pierrezemb.recordstore; +import static fr.pierrezemb.recordstore.Constants.CONFIG_BISCUIT_KEY_DEFAULT; import fr.pierrezemb.recordstore.datasets.DatasetsLoader; import fr.pierrezemb.recordstore.fdb.RecordLayer; import fr.pierrezemb.recordstore.grpc.AdminService; import fr.pierrezemb.recordstore.grpc.AuthInterceptor; +import fr.pierrezemb.recordstore.grpc.ManagedKVService; import fr.pierrezemb.recordstore.grpc.RecordService; import fr.pierrezemb.recordstore.grpc.SchemaService; import io.vertx.core.AbstractVerticle; import io.vertx.core.Promise; import io.vertx.grpc.VertxServer; import io.vertx.grpc.VertxServerBuilder; +import javax.crypto.spec.SecretKeySpec; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.crypto.spec.SecretKeySpec; - -import static fr.pierrezemb.recordstore.Constants.CONFIG_BISCUIT_KEY_DEFAULT; - - public class GrpcVerticle extends AbstractVerticle { private static final Logger LOGGER = LoggerFactory.getLogger(GrpcVerticle.class); @@ -41,15 +39,30 @@ public class GrpcVerticle extends AbstractVerticle { @Override public void start(Promise startPromise) throws Exception { - String clusterFilePath = this.context.config().getString(Constants.CONFIG_FDB_CLUSTER_FILE, Constants.CONFIG_FDB_CLUSTER_FILE_DEFAULT); - System.out.println("connecting to fdb@" + clusterFilePath); - - String tokenKey = this.context.config().getString(Constants.CONFIG_BISCUIT_KEY, CONFIG_BISCUIT_KEY_DEFAULT); + String clusterFilePath = + this.context + .config() + .getString( + Constants.CONFIG_FDB_CLUSTER_FILE, Constants.CONFIG_FDB_CLUSTER_FILE_DEFAULT); + LOGGER.info("connecting to fdb@" + clusterFilePath); + + boolean enableManagedKV = + this.context + .config() + .getBoolean( + Constants.CONFIG_ENABLE_MANAGED_KV, Constants.CONFIG_ENABLE_MANAGED_KV_DEFAULT); + + String tokenKey = + this.context.config().getString(Constants.CONFIG_BISCUIT_KEY, CONFIG_BISCUIT_KEY_DEFAULT); if (tokenKey.equals(CONFIG_BISCUIT_KEY_DEFAULT)) { LOGGER.warn("using default key for tokens"); } - byte[] key = this.context.config().getString(Constants.CONFIG_ENCRYPTION_KEY, Constants.CONFIG_ENCRYPTION_KEY_DEFAULT).getBytes(); + byte[] key = + this.context + .config() + .getString(Constants.CONFIG_ENCRYPTION_KEY, Constants.CONFIG_ENCRYPTION_KEY_DEFAULT) + .getBytes(); if (new String(key).equals(Constants.CONFIG_ENCRYPTION_KEY_DEFAULT)) { LOGGER.warn("using default encryption key for records"); } @@ -60,25 +73,33 @@ public void start(Promise startPromise) throws Exception { DatasetsLoader datasetsLoader = new DatasetsLoader(recordLayer); datasetsLoader.loadDataset(this.context.config().getString(Constants.CONFIG_LOAD_DEMO, "")); - VertxServerBuilder serverBuilder = VertxServerBuilder - .forAddress(vertx, - this.context.config().getString(Constants.CONFIG_GRPC_LISTEN_ADDRESS, "localhost"), - this.context.config().getInteger(Constants.CONFIG_GRPC_LISTEN_PORT, 8080)) - .intercept(new AuthInterceptor(tokenKey)) - .addService(new AdminService(recordLayer)) - .addService(new SchemaService(recordLayer)) - .addService(new RecordService(recordLayer)); + VertxServerBuilder serverBuilder = + VertxServerBuilder.forAddress( + vertx, + this.context.config().getString(Constants.CONFIG_GRPC_LISTEN_ADDRESS, "localhost"), + this.context.config().getInteger(Constants.CONFIG_GRPC_LISTEN_PORT, 8080)) + .intercept(new AuthInterceptor(tokenKey)) + .addService(new AdminService(recordLayer)) + .addService(new SchemaService(recordLayer)) + .addService(new RecordService(recordLayer)); + + if (enableManagedKV) { + LOGGER.info("enabling ManagedKV"); + serverBuilder.addService(new ManagedKVService(recordLayer)); + } VertxServer server = serverBuilder.build(); - server.start(ar -> { - if (ar.succeeded()) { - System.out.println("gRPC service started on " + this.context.config().getInteger("grpc-listen-port")); - startPromise.complete(); - } else { - System.out.println("Could not start server " + ar.cause().getMessage()); - startPromise.fail(ar.cause()); - } - }); + server.start( + ar -> { + if (ar.succeeded()) { + System.out.println( + "gRPC service started on " + this.context.config().getInteger("grpc-listen-port")); + startPromise.complete(); + } else { + System.out.println("Could not start server " + ar.cause().getMessage()); + startPromise.fail(ar.cause()); + } + }); } } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/Launcher.java b/record-store/src/main/java/fr/pierrezemb/recordstore/Launcher.java index ebf943d..0cc574e 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/Launcher.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/Launcher.java @@ -29,14 +29,15 @@ public static void main(String[] args) { @Override public void beforeStartingVertx(VertxOptions options) { options.setMetricsOptions( - new MicrometerMetricsOptions() - .setPrometheusOptions(new VertxPrometheusOptions().setEnabled(true) - .setStartEmbeddedServer(true) - .setEmbeddedServerOptions(new HttpServerOptions() - .setHost("127.0.0.1") - .setPort(9098)) - .setEmbeddedServerEndpoint("/metrics")) - .setEnabled(true)); + new MicrometerMetricsOptions() + .setPrometheusOptions( + new VertxPrometheusOptions() + .setEnabled(true) + .setStartEmbeddedServer(true) + .setEmbeddedServerOptions( + new HttpServerOptions().setHost("127.0.0.1").setPort(9098)) + .setEmbeddedServerEndpoint("/metrics")) + .setEnabled(true)); System.out.println("starting metrics on 9098"); } } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitClientCredential.java b/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitClientCredential.java index 050d9fc..2939020 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitClientCredential.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitClientCredential.java @@ -15,15 +15,14 @@ */ package fr.pierrezemb.recordstore.auth; -import io.grpc.Metadata; -import io.grpc.Status; - -import java.util.concurrent.Executor; - import static fr.pierrezemb.recordstore.grpc.GrpcMetadataKeys.AUTHORIZATION_METADATA_KEY; import static fr.pierrezemb.recordstore.grpc.GrpcMetadataKeys.RECORDSPACE_METADATA_KEY; import static fr.pierrezemb.recordstore.grpc.GrpcMetadataKeys.TENANT_METADATA_KEY; +import io.grpc.Metadata; +import io.grpc.Status; +import java.util.concurrent.Executor; + public class BiscuitClientCredential extends io.grpc.CallCredentials { static final String BEARER_TYPE = "Bearer"; @@ -38,22 +37,24 @@ public BiscuitClientCredential(String tenant, String sealedBiscuit, String recor } @Override - public void applyRequestMetadata(RequestInfo requestInfo, Executor appExecutor, MetadataApplier applier) { - appExecutor.execute(new Runnable() { - @Override - public void run() { - try { - Metadata headers = new Metadata(); - headers.put(AUTHORIZATION_METADATA_KEY, String.format("%s %s", BEARER_TYPE, biscuit)); - headers.put(RECORDSPACE_METADATA_KEY, recordSpace); - headers.put(TENANT_METADATA_KEY, tenant); - applier.apply(headers); + public void applyRequestMetadata( + RequestInfo requestInfo, Executor appExecutor, MetadataApplier applier) { + appExecutor.execute( + new Runnable() { + @Override + public void run() { + try { + Metadata headers = new Metadata(); + headers.put(AUTHORIZATION_METADATA_KEY, String.format("%s %s", BEARER_TYPE, biscuit)); + headers.put(RECORDSPACE_METADATA_KEY, recordSpace); + headers.put(TENANT_METADATA_KEY, tenant); + applier.apply(headers); - } catch (Throwable e) { - applier.fail(Status.UNAUTHENTICATED.withCause(e)); - } - } - }); + } catch (Throwable e) { + applier.fail(Status.UNAUTHENTICATED.withCause(e)); + } + } + }); } /** diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitManager.java b/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitManager.java index 00e844a..5823f3e 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitManager.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/auth/BiscuitManager.java @@ -15,6 +15,17 @@ */ package fr.pierrezemb.recordstore.auth; +import static com.clevercloud.biscuit.token.builder.Utils.caveat; +import static com.clevercloud.biscuit.token.builder.Utils.fact; +import static com.clevercloud.biscuit.token.builder.Utils.pred; +import static com.clevercloud.biscuit.token.builder.Utils.rule; +import static com.clevercloud.biscuit.token.builder.Utils.s; +import static com.clevercloud.biscuit.token.builder.Utils.string; +import static com.clevercloud.biscuit.token.builder.Utils.var; +import static fr.pierrezemb.recordstore.Constants.CONFIG_BISCUIT_KEY_DEFAULT; +import static io.vavr.API.Left; +import static io.vavr.API.Right; + import com.clevercloud.biscuit.crypto.KeyPair; import com.clevercloud.biscuit.datalog.SymbolTable; import com.clevercloud.biscuit.error.Error; @@ -24,24 +35,12 @@ import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.vavr.control.Either; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.security.SecureRandom; import java.util.Arrays; import java.util.Base64; import java.util.List; - -import static com.clevercloud.biscuit.token.builder.Utils.caveat; -import static com.clevercloud.biscuit.token.builder.Utils.fact; -import static com.clevercloud.biscuit.token.builder.Utils.pred; -import static com.clevercloud.biscuit.token.builder.Utils.rule; -import static com.clevercloud.biscuit.token.builder.Utils.s; -import static com.clevercloud.biscuit.token.builder.Utils.string; -import static com.clevercloud.biscuit.token.builder.Utils.var; -import static fr.pierrezemb.recordstore.Constants.CONFIG_BISCUIT_KEY_DEFAULT; -import static io.vavr.API.Left; -import static io.vavr.API.Right; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class BiscuitManager { private static final Logger LOGGER = LoggerFactory.getLogger(BiscuitManager.class); @@ -64,20 +63,23 @@ public String create(String tenant, List authorizedContainers) { // add tenant fact in biscuit authority_builder.add_fact( - fact("right", Arrays.asList(s("authority"), s("tenant"), s(tenant)))); + fact("right", Arrays.asList(s("authority"), s("tenant"), s(tenant)))); // add recordSpaces in biscuit for (String s : authorizedContainers) { - authority_builder.add_fact(fact("right", Arrays.asList(s("authority"), s("recordSpace"), s(s)))); + authority_builder.add_fact( + fact("right", Arrays.asList(s("authority"), s("recordSpace"), s(s)))); } - Either result = Biscuit.make(rng, root, Biscuit.default_symbol_table(), authority_builder.build()); + Either result = + Biscuit.make(rng, root, Biscuit.default_symbol_table(), authority_builder.build()); if (result.isLeft()) { LOGGER.error("cannot create biscuit: {}", result.getLeft()); throw new StatusRuntimeException(Status.INTERNAL.withDescription("cannot create biscuit")); } - Either resultSerialize = result.get().seal(root.private_key.toByteArray()); + Either resultSerialize = + result.get().seal(root.private_key.toByteArray()); if (result.isLeft()) { LOGGER.error("cannot serialize biscuit: {}", result.getLeft()); throw new StatusRuntimeException(Status.INTERNAL.withDescription("cannot serialize biscuit")); @@ -98,21 +100,22 @@ public Either checkTenant(String tenant, String serializedBiscuit) verifier.add_fact(fact("tenant", Arrays.asList(s("ambient"), s(tenant)))); verifier.set_time(); - verifier.add_caveat(caveat(rule( - "checked_tenant_right", - Arrays.asList(string(tenant)), - Arrays.asList(pred("right", Arrays.asList(s("authority"), s("tenant"), s(tenant)))) - ))); + verifier.add_caveat( + caveat( + rule( + "checked_tenant_right", + Arrays.asList(string(tenant)), + Arrays.asList( + pred("right", Arrays.asList(s("authority"), s("tenant"), s(tenant))))))); return verifier.verify(); } public Either createVerifier(String serializedBiscuit) { - Either deser = Biscuit.from_sealed( - Base64.getDecoder().decode(serializedBiscuit), - root.private_key.toByteArray() - ); + Either deser = + Biscuit.from_sealed( + Base64.getDecoder().decode(serializedBiscuit), root.private_key.toByteArray()); if (deser.isLeft()) { Error.FormatError e = (Error.FormatError) deser.getLeft(); LOGGER.error("cannot deserialize biscuit: {}", e.toString()); @@ -128,12 +131,12 @@ public Either createVerifier(String serializedBiscuit) { } Verifier verifier = res.get(); - verifier.add_rule(rule( - "right", - Arrays.asList(s("authority"), s("tenant"), var(0)), - Arrays.asList(pred("right", Arrays.asList(s("authority"), s("tenant"), var(0)))))); + verifier.add_rule( + rule( + "right", + Arrays.asList(s("authority"), s("tenant"), var(0)), + Arrays.asList(pred("right", Arrays.asList(s("authority"), s("tenant"), var(0)))))); return Right(verifier); } - } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/Dataset.java b/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/Dataset.java index 03c24e1..f366e46 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/Dataset.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/Dataset.java @@ -20,5 +20,6 @@ import fr.pierrezemb.recordstore.fdb.RecordLayer; public interface Dataset { - void load(RecordLayer recordLayer, String tenant, String recordSpace, int nbrRecord) throws Descriptors.DescriptorValidationException, InvalidProtocolBufferException; + void load(RecordLayer recordLayer, String tenant, String recordSpace, int nbrRecord) + throws Descriptors.DescriptorValidationException, InvalidProtocolBufferException; } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/DatasetsLoader.java b/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/DatasetsLoader.java index d0570e6..d1ffaff 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/DatasetsLoader.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/DatasetsLoader.java @@ -18,11 +18,10 @@ import com.google.protobuf.Descriptors; import com.google.protobuf.InvalidProtocolBufferException; import fr.pierrezemb.recordstore.fdb.RecordLayer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class DatasetsLoader { public static final String DEFAULT_DEMO_TENANT = "demo"; @@ -35,7 +34,8 @@ public DatasetsLoader(RecordLayer recordLayer) { nbrRecords = 100; } - public void loadDataset(List datasets) throws InvalidProtocolBufferException, Descriptors.DescriptorValidationException { + public void loadDataset(List datasets) + throws InvalidProtocolBufferException, Descriptors.DescriptorValidationException { List alreadyLoadedDatasets = this.recordLayer.listContainers(DEFAULT_DEMO_TENANT); for (DemoDatasetEnum d : datasets) { Dataset dataset; @@ -55,7 +55,8 @@ public void loadDataset(List datasets) throws InvalidProtocolBu } } - public void loadDataset(String datasetsToLoad) throws InvalidProtocolBufferException, Descriptors.DescriptorValidationException { + public void loadDataset(String datasetsToLoad) + throws InvalidProtocolBufferException, Descriptors.DescriptorValidationException { if (datasetsToLoad == null) { return; diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/UserDataset.java b/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/UserDataset.java index 6129c25..6719a1f 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/UserDataset.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/datasets/UserDataset.java @@ -24,59 +24,61 @@ import fr.pierrezemb.recordstore.fdb.RecordLayer; import fr.pierrezemb.recordstore.proto.RecordStoreProtocol; import fr.pierrezemb.recordstore.utils.protobuf.ProtobufReflectionUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.HashMap; import java.util.Random; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class UserDataset implements Dataset { private static final Logger LOGGER = LoggerFactory.getLogger(UserDataset.class); @Override - public void load(RecordLayer recordLayer, String tenant, String recordSpace, int nbrRecord) throws Descriptors.DescriptorValidationException, InvalidProtocolBufferException { + public void load(RecordLayer recordLayer, String tenant, String recordSpace, int nbrRecord) + throws Descriptors.DescriptorValidationException, InvalidProtocolBufferException { DescriptorProtos.FileDescriptorSet dependencies = - ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); + ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); recordLayer.upsertSchema( - tenant, - recordSpace, - dependencies, - ImmutableList.of( - RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() - .setName("User") - .addAllIndexDefinitions(ImmutableList.of( - RecordStoreProtocol.IndexDefinition.newBuilder() - .setIndexType(RecordStoreProtocol.IndexType.VALUE) - .setFanType(RecordStoreProtocol.FanType.FAN_OUT) - .setField("beers").build(), - RecordStoreProtocol.IndexDefinition.newBuilder() - .setIndexType(RecordStoreProtocol.IndexType.VALUE) - .setField("name").build(), - RecordStoreProtocol.IndexDefinition.newBuilder() - .setIndexType(RecordStoreProtocol.IndexType.VALUE) - .setField("email") - .build(), - RecordStoreProtocol.IndexDefinition.newBuilder() - .setIndexType(RecordStoreProtocol.IndexType.MAP_KEYS_AND_VALUES) - .setField("favorite_locations_from_tv") - .build(), - RecordStoreProtocol.IndexDefinition.newBuilder() - .setIndexType(RecordStoreProtocol.IndexType.TEXT_DEFAULT_TOKENIZER) - .setField("rick_and_morty_quotes") - .build(), - RecordStoreProtocol.IndexDefinition.newBuilder() - .setField("address") - .setNestedIndex(RecordStoreProtocol.IndexDefinition.newBuilder() - .setField("city") - .build()) - .build())) - .addAllPrimaryKeyFields(ImmutableList.of("id")) - .build() - ) - ); + tenant, + recordSpace, + dependencies, + ImmutableList.of( + RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() + .setName("User") + .addAllIndexDefinitions( + ImmutableList.of( + RecordStoreProtocol.IndexDefinition.newBuilder() + .setIndexType(RecordStoreProtocol.IndexType.VALUE) + .setFanType(RecordStoreProtocol.FanType.FAN_OUT) + .setField("beers") + .build(), + RecordStoreProtocol.IndexDefinition.newBuilder() + .setIndexType(RecordStoreProtocol.IndexType.VALUE) + .setField("name") + .build(), + RecordStoreProtocol.IndexDefinition.newBuilder() + .setIndexType(RecordStoreProtocol.IndexType.VALUE) + .setField("email") + .build(), + RecordStoreProtocol.IndexDefinition.newBuilder() + .setIndexType(RecordStoreProtocol.IndexType.MAP_KEYS_AND_VALUES) + .setField("favorite_locations_from_tv") + .build(), + RecordStoreProtocol.IndexDefinition.newBuilder() + .setIndexType(RecordStoreProtocol.IndexType.TEXT_DEFAULT_TOKENIZER) + .setField("rick_and_morty_quotes") + .build(), + RecordStoreProtocol.IndexDefinition.newBuilder() + .setField("address") + .setNestedIndex( + RecordStoreProtocol.IndexDefinition.newBuilder() + .setField("city") + .build()) + .build())) + .addAllPrimaryKeyFields(ImmutableList.of("id")) + .build())); Faker faker = new Faker(new Random(42)); @@ -100,23 +102,25 @@ public DemoUserProto.User createUser(long id, Faker faker) { } HashMap favoritePlanets = new HashMap<>(); - favoritePlanets.put("hitchhikers_guide_to_the_galaxy", faker.hitchhikersGuideToTheGalaxy().planet()); + favoritePlanets.put( + "hitchhikers_guide_to_the_galaxy", faker.hitchhikersGuideToTheGalaxy().planet()); favoritePlanets.put("rick_and_morty", faker.rickAndMorty().location()); favoritePlanets.put("star_trek", faker.starTrek().location()); - DemoUserProto.Address address = DemoUserProto.Address.newBuilder() - .setFullAddress(faker.address().fullAddress()) - .setCity(faker.address().cityName()) - .build(); + DemoUserProto.Address address = + DemoUserProto.Address.newBuilder() + .setFullAddress(faker.address().fullAddress()) + .setCity(faker.address().cityName()) + .build(); return DemoUserProto.User.newBuilder() - .setId(id) - .setName(faker.funnyName().name()) - .setEmail(faker.internet().emailAddress()) - .addAllBeers(beers) - .setRickAndMortyQuotes(faker.rickAndMorty().quote()) - .putAllFavoriteLocationsFromTv(favoritePlanets) - .setAddress(address) - .build(); + .setId(id) + .setName(faker.funnyName().name()) + .setEmail(faker.internet().emailAddress()) + .addAllBeers(beers) + .setRickAndMortyQuotes(faker.rickAndMorty().quote()) + .putAllFavoriteLocationsFromTv(favoritePlanets) + .setAddress(address) + .build(); } } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/ManagedSchemaKeySpace.java b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/ManagedSchemaKeySpace.java new file mode 100644 index 0000000..e6d202b --- /dev/null +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/ManagedSchemaKeySpace.java @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Pierre Zemb + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package fr.pierrezemb.recordstore.fdb; + +import com.apple.foundationdb.record.provider.foundationdb.keyspace.DirectoryLayerDirectory; +import com.apple.foundationdb.record.provider.foundationdb.keyspace.KeySpace; +import com.apple.foundationdb.record.provider.foundationdb.keyspace.KeySpaceDirectory; +import com.apple.foundationdb.record.provider.foundationdb.keyspace.KeySpacePath; + +public class ManagedSchemaKeySpace { + public static final String APPLICATION_NAME = "record-store"; + public static final KeySpace RS_KEY_SPACE = + new KeySpace( + new DirectoryLayerDirectory("application") + .addSubdirectory( + new KeySpaceDirectory("tenant", KeySpaceDirectory.KeyType.STRING) + .addSubdirectory( + new KeySpaceDirectory( + "managedSchemaType", KeySpaceDirectory.KeyType.STRING) + .addSubdirectory( + new KeySpaceDirectory( + "managedSchema", KeySpaceDirectory.KeyType.STRING) + .addSubdirectory( + new KeySpaceDirectory( + "data", KeySpaceDirectory.KeyType.STRING, "d")))))); + + public static KeySpacePath openDataKeySpacePath( + String tenant, String managedSchemaType, String managedSchema) { + return openKeySpacePath(tenant, managedSchemaType, managedSchema); + } + + private static KeySpacePath openKeySpacePath( + String tenant, String managedSchemaType, String managedSchema) { + return RS_KEY_SPACE + .path("application", APPLICATION_NAME) + .add("tenant", tenant) + .add("managedSchemaType", managedSchemaType) + .add("managedSchema", managedSchema) + .add("data"); + } +} diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordLayer.java b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordLayer.java index 9b0fb2a..211d5f8 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordLayer.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordLayer.java @@ -15,6 +15,11 @@ */ package fr.pierrezemb.recordstore.fdb; +import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.COUNT_INDEX; +import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.COUNT_UPDATES_INDEX; +import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.INDEX_COUNT_AGGREGATE_FUNCTION; +import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.INDEX_COUNT_UPDATES_AGGREGATE_FUNCTION; + import com.apple.foundationdb.record.CursorStreamingMode; import com.apple.foundationdb.record.EvaluationContext; import com.apple.foundationdb.record.ExecuteProperties; @@ -42,7 +47,6 @@ import com.apple.foundationdb.record.provider.foundationdb.keyspace.KeySpacePath; import com.apple.foundationdb.record.provider.foundationdb.keyspace.ResolvedKeySpacePath; import com.apple.foundationdb.record.query.RecordQuery; -import com.apple.foundationdb.record.query.plan.plans.RecordQueryPlan; import com.apple.foundationdb.tuple.Tuple; import com.google.protobuf.DescriptorProtos; import com.google.protobuf.Descriptors; @@ -55,10 +59,6 @@ import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; import io.vertx.core.Promise; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.crypto.SecretKey; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -70,11 +70,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; - -import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.COUNT_INDEX; -import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.COUNT_UPDATES_INDEX; -import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.INDEX_COUNT_AGGREGATE_FUNCTION; -import static fr.pierrezemb.recordstore.fdb.UniversalIndexes.INDEX_COUNT_UPDATES_AGGREGATE_FUNCTION; +import javax.crypto.SecretKey; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class RecordLayer { private static final Logger LOGGER = LoggerFactory.getLogger(RecordLayer.class); @@ -82,7 +80,8 @@ public class RecordLayer { private final FDBMetricsStoreTimer timer; private final SecretKey defaultKey; - public RecordLayer(String clusterFilePath, boolean enableMetrics, SecretKey key) throws InterruptedException, ExecutionException, TimeoutException { + public RecordLayer(String clusterFilePath, boolean enableMetrics, SecretKey key) + throws InterruptedException, ExecutionException, TimeoutException { db = FDBDatabaseFactory.instance().getDatabase(clusterFilePath); db.performNoOpAsync().get(2, TimeUnit.SECONDS); System.out.println("connected to FDB!"); @@ -90,68 +89,71 @@ public RecordLayer(String clusterFilePath, boolean enableMetrics, SecretKey key) defaultKey = key; } - /** - * List all recordSpaces for a tenant - */ + /** List all recordSpaces for a tenant */ public List listContainers(String tenantID) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - KeySpacePath tenantKeySpace = RecordStoreKeySpace.getApplicationKeySpacePath(tenantID); - List recordSpaces = tenantKeySpace - .listSubdirectory(context, "recordSpace", ScanProperties.FORWARD_SCAN); + KeySpacePath tenantKeySpace = RecordStoreKeySpace.openKeySpacePath(tenantID); + List recordSpaces = + tenantKeySpace.listSubdirectory(context, "recordSpace", ScanProperties.FORWARD_SCAN); return recordSpaces.stream() - .map(e -> e.getResolvedValue().toString()) - .collect(Collectors.toList()); + .map(e -> e.getResolvedValue().toString()) + .collect(Collectors.toList()); } - /** - * delete a recordSpace for a tenant - */ + /** delete a recordSpace for a tenant */ public void deleteContainer(String tenantID, String recordSpace) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBRecordStore.deleteStore(context, RecordStoreKeySpace.getDataKeySpacePath(tenantID, recordSpace)); - FDBRecordStore.deleteStore(context, RecordStoreKeySpace.getMetaDataKeySpacePath(tenantID, recordSpace)); + FDBRecordStore.deleteStore( + context, RecordStoreKeySpace.openDataKeySpacePath(tenantID, recordSpace)); + FDBRecordStore.deleteStore( + context, RecordStoreKeySpace.openMetaDataKeySpacePath(tenantID, recordSpace)); context.commit(); } - /** - * get schema for a tenant and a recordSpace - */ + /** get schema for a tenant and a recordSpace */ public RecordMetaData getSchema(String tenantID, String recordSpace) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metaDataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); + FDBMetaDataStore metaDataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); - List indexes = metaDataStore.getRecordMetaData().getAllIndexes().stream() - .filter(e -> !e.getName().startsWith("global")) - .map(e -> - RecordStoreProtocol.IndexDescription.newBuilder() - .build() - ).collect(Collectors.toList()); + List indexes = + metaDataStore.getRecordMetaData().getAllIndexes().stream() + .filter(e -> !e.getName().startsWith("global")) + .map(e -> RecordStoreProtocol.IndexDescription.newBuilder().build()) + .collect(Collectors.toList()); return metaDataStore.getRecordMetaData(); } - public List getIndexes(String tenantID, String recordSpace) { + public List getIndexes( + String tenantID, String recordSpace) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metaDataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); + FDBMetaDataStore metaDataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); return metaDataStore.getRecordMetaData().getAllIndexes().stream() - .filter(e -> !e.getName().startsWith("global")) - .map(e -> - RecordStoreProtocol.IndexDescription.newBuilder() - .build() - ).collect(Collectors.toList()); + .filter(e -> !e.getName().startsWith("global")) + .map(e -> RecordStoreProtocol.IndexDescription.newBuilder().build()) + .collect(Collectors.toList()); } - public void upsertSchema(String tenantID, String recordSpace, DescriptorProtos.FileDescriptorSet schema, List indexes) throws Descriptors.DescriptorValidationException { + public void upsertSchema( + String tenantID, + String recordSpace, + DescriptorProtos.FileDescriptorSet schema, + List indexes) + throws Descriptors.DescriptorValidationException { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metaDataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); + FDBMetaDataStore metaDataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); RecordMetaData oldMetaData = null; int version = 0; try { oldMetaData = metaDataStore.getRecordMetaData(); - LOGGER.debug("metadata for {}:{} is in version {}", tenantID, recordSpace, oldMetaData.getVersion()); + LOGGER.debug( + "metadata for {}:{} is in version {}", tenantID, recordSpace, oldMetaData.getVersion()); version = oldMetaData.getVersion() + 1; } catch (FDBMetaDataStore.MissingMetaDataException e) { LOGGER.info("missing metadata, creating one"); @@ -161,10 +163,11 @@ public void upsertSchema(String tenantID, String recordSpace, DescriptorProtos.F // handling upgrade if (null != oldMetaData) { - MetaDataEvolutionValidator metaDataEvolutionValidator = MetaDataEvolutionValidator.newBuilder() - .setAllowIndexRebuilds(true) - .setAllowMissingFormerIndexNames(false) - .build(); + MetaDataEvolutionValidator metaDataEvolutionValidator = + MetaDataEvolutionValidator.newBuilder() + .setAllowIndexRebuilds(true) + .setAllowMissingFormerIndexNames(false) + .build(); metaDataEvolutionValidator.validate(oldMetaData, newRecordMetaData); } @@ -173,16 +176,21 @@ public void upsertSchema(String tenantID, String recordSpace, DescriptorProtos.F metaDataStore.saveRecordMetaData(newRecordMetaData.getRecordMetaData().toProto()); context.commit(); - } - private RecordMetaData createRecordMetaData(DescriptorProtos.FileDescriptorSet schema, List indexes, int version, RecordMetaData oldMetadata) throws Descriptors.DescriptorValidationException { + private RecordMetaData createRecordMetaData( + DescriptorProtos.FileDescriptorSet schema, + List indexes, + int version, + RecordMetaData oldMetadata) + throws Descriptors.DescriptorValidationException { // retrieving protobuf descriptor RecordMetaDataBuilder metadataBuilder = RecordMetaData.newBuilder(); for (DescriptorProtos.FileDescriptorProto fdp : schema.getFileList()) { - Descriptors.FileDescriptor fd = Descriptors.FileDescriptor.buildFrom(fdp, new Descriptors.FileDescriptor[]{}); + Descriptors.FileDescriptor fd = + Descriptors.FileDescriptor.buildFrom(fdp, new Descriptors.FileDescriptor[] {}); // updating schema metadataBuilder.setRecords(fd); } @@ -192,16 +200,16 @@ private RecordMetaData createRecordMetaData(DescriptorProtos.FileDescriptorSet s metadataBuilder.setStoreRecordVersions(true); metadataBuilder.setSplitLongRecords(true); - HashSet oldIndexes = oldMetadata != null ? - new HashSet<>(oldMetadata.getAllIndexes()) : - new HashSet<>(); + HashSet oldIndexes = + oldMetadata != null ? new HashSet<>(oldMetadata.getAllIndexes()) : new HashSet<>(); HashSet oldIndexesNames = new HashSet<>(); // add old indexes for (Index index : oldIndexes) { LOGGER.trace("adding old index {}", index.getName()); oldIndexesNames.add(index.getName()); - if (index.getName().equals(UniversalIndexes.COUNT_INDEX_NAME) || index.getName().equals(UniversalIndexes.COUNT_UPDATES_INDEX_NAME)) { + if (index.getName().equals(UniversalIndexes.COUNT_INDEX_NAME) + || index.getName().equals(UniversalIndexes.COUNT_UPDATES_INDEX_NAME)) { metadataBuilder.addUniversalIndex(index); } else { // we need to retrieve the record @@ -219,7 +227,8 @@ private RecordMetaData createRecordMetaData(DescriptorProtos.FileDescriptorSet s for (RecordStoreProtocol.RecordTypeIndexDefinition idxRequest : indexes) { LOGGER.trace("adding indexes for {}", idxRequest.getName()); // add new indexes - for (RecordStoreProtocol.IndexDefinition indexDefinition : idxRequest.getIndexDefinitionsList()) { + for (RecordStoreProtocol.IndexDefinition indexDefinition : + idxRequest.getIndexDefinitionsList()) { String indexName = generateIndexName(idxRequest.getName(), indexDefinition); Index index = createIndex(indexDefinition, indexName); @@ -229,11 +238,11 @@ private RecordMetaData createRecordMetaData(DescriptorProtos.FileDescriptorSet s } } // set primary key - metadataBuilder.getRecordType(idxRequest.getName()) - .setPrimaryKey(buildPrimaryKeyExpression(idxRequest.getPrimaryKeyFieldsList())); + metadataBuilder + .getRecordType(idxRequest.getName()) + .setPrimaryKey(buildPrimaryKeyExpression(idxRequest.getPrimaryKeyFieldsList())); } - if (oldMetadata == null) { metadataBuilder.addUniversalIndex(COUNT_INDEX); metadataBuilder.addUniversalIndex(COUNT_UPDATES_INDEX); @@ -242,11 +251,20 @@ private RecordMetaData createRecordMetaData(DescriptorProtos.FileDescriptorSet s return metadataBuilder.build(); } - private String generateIndexName(String name, RecordStoreProtocol.IndexDefinition indexDefinition) { + private String generateIndexName( + String name, RecordStoreProtocol.IndexDefinition indexDefinition) { if (!indexDefinition.hasNestedIndex()) { - return name + "_idx_" + indexDefinition.getField() + "_" + indexDefinition.getIndexType().toString(); + return name + + "_idx_" + + indexDefinition.getField() + + "_" + + indexDefinition.getIndexType().toString(); } - return name + "_idx_" + indexDefinition.getField() + "_nested_" + generateIndexName(name, indexDefinition.getNestedIndex()); + return name + + "_idx_" + + indexDefinition.getField() + + "_nested_" + + generateIndexName(name, indexDefinition.getNestedIndex()); } private Index createIndex(RecordStoreProtocol.IndexDefinition indexDefinition, String indexName) { @@ -254,48 +272,41 @@ private Index createIndex(RecordStoreProtocol.IndexDefinition indexDefinition, S if (indexDefinition.hasNestedIndex()) { return new Index( - indexName, - Key.Expressions.field(indexDefinition.getField(), getFanType(indexDefinition.getFanType())) - .nest(createKeyExpressionFromIndexDefinition(indexDefinition.getNestedIndex()))); + indexName, + Key.Expressions.field( + indexDefinition.getField(), getFanType(indexDefinition.getFanType())) + .nest(createKeyExpressionFromIndexDefinition(indexDefinition.getNestedIndex()))); } switch (indexDefinition.getIndexType()) { case VALUE: - index = new Index( - indexName, - Key.Expressions.field(indexDefinition.getField(), getFanType(indexDefinition.getFanType())), - IndexTypes.VALUE); + index = + new Index( + indexName, + Key.Expressions.field( + indexDefinition.getField(), getFanType(indexDefinition.getFanType())), + IndexTypes.VALUE); break; - // https://github.com/FoundationDB/fdb-record-layer/blob/e70d3f9b5cec1cf37b6f540d4e673059f2a628ab/fdb-record-layer-core/src/main/java/com/apple/foundationdb/record/provider/foundationdb/indexes/TextIndexMaintainer.java#L81-L93 + // https://github.com/FoundationDB/fdb-record-layer/blob/e70d3f9b5cec1cf37b6f540d4e673059f2a628ab/fdb-record-layer-core/src/main/java/com/apple/foundationdb/record/provider/foundationdb/indexes/TextIndexMaintainer.java#L81-L93 case TEXT_DEFAULT_TOKENIZER: - index = new Index( - indexName, - Key.Expressions.field(indexDefinition.getField(), getFanType(indexDefinition.getFanType())), - IndexTypes.TEXT); + index = + new Index( + indexName, + Key.Expressions.field( + indexDefinition.getField(), getFanType(indexDefinition.getFanType())), + IndexTypes.TEXT); break; case VERSION: - index = new Index( - indexName, - VersionKeyExpression.VERSION, - IndexTypes.VERSION); + index = new Index(indexName, VersionKeyExpression.VERSION, IndexTypes.VERSION); break; case MAP_KEYS: - index = new Index( - indexName, - Key.Expressions.mapKeys(indexDefinition.getField()) - ); + index = new Index(indexName, Key.Expressions.mapKeys(indexDefinition.getField())); break; case MAP_VALUES: - index = new Index( - indexName, - Key.Expressions.mapValues(indexDefinition.getField()) - ); + index = new Index(indexName, Key.Expressions.mapValues(indexDefinition.getField())); break; case MAP_KEYS_AND_VALUES: - index = new Index( - indexName, - Key.Expressions.mapKeyValues(indexDefinition.getField()) - ); + index = new Index(indexName, Key.Expressions.mapKeyValues(indexDefinition.getField())); break; case UNRECOGNIZED: return null; @@ -303,7 +314,8 @@ private Index createIndex(RecordStoreProtocol.IndexDefinition indexDefinition, S return index; } - private KeyExpression createKeyExpressionFromIndexDefinition(RecordStoreProtocol.IndexDefinition nestedIndex) { + private KeyExpression createKeyExpressionFromIndexDefinition( + RecordStoreProtocol.IndexDefinition nestedIndex) { return Key.Expressions.field(nestedIndex.getField(), getFanType(nestedIndex.getFanType())); } @@ -322,10 +334,8 @@ private KeyExpression.FanType getFanType(RecordStoreProtocol.FanType fanType) { } private KeyExpression buildPrimaryKeyExpression(List primaryKeyFields) { - List keyExpressions = primaryKeyFields - .stream() - .map(Key.Expressions::field) - .collect(Collectors.toList()); + List keyExpressions = + primaryKeyFields.stream().map(Key.Expressions::field).collect(Collectors.toList()); // adding the recordType in the key expressions. Following advices from // https://forums.foundationdb.org/t/split-long-record-causes-conflict-with-other-record/2160/2?u=pierrez @@ -340,139 +350,278 @@ public Tuple getCountAndCountUpdates(String tenantID, String recordSpace) { public Tuple getCountAndCountUpdates(String tenantID, String recordSpace, SecretKey key) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metadataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); + FDBMetaDataStore metadataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); FDBRecordStore r = createFDBRecordStore(context, metadataStore, key, tenantID, recordSpace); - CompletableFuture countFuture = r.evaluateAggregateFunction( - EvaluationContext.EMPTY, - Collections.emptyList(), - INDEX_COUNT_AGGREGATE_FUNCTION, - TupleRange.ALL, - IsolationLevel.SERIALIZABLE); - - CompletableFuture updateFuture = r.evaluateAggregateFunction( - EvaluationContext.EMPTY, - Collections.emptyList(), - INDEX_COUNT_UPDATES_AGGREGATE_FUNCTION, - TupleRange.ALL, - IsolationLevel.SERIALIZABLE); - - return countFuture.thenCombine(updateFuture, (count, update) - -> Tuple.from(count.getLong(0), update.getLong(0))).join(); + CompletableFuture countFuture = + r.evaluateAggregateFunction( + EvaluationContext.EMPTY, + Collections.emptyList(), + INDEX_COUNT_AGGREGATE_FUNCTION, + TupleRange.ALL, + IsolationLevel.SERIALIZABLE); + + CompletableFuture updateFuture = + r.evaluateAggregateFunction( + EvaluationContext.EMPTY, + Collections.emptyList(), + INDEX_COUNT_UPDATES_AGGREGATE_FUNCTION, + TupleRange.ALL, + IsolationLevel.SERIALIZABLE); + + return countFuture + .thenCombine( + updateFuture, (count, update) -> Tuple.from(count.getLong(0), update.getLong(0))) + .join(); } - public void putRecord(String tenantID, String recordSpace, String table, byte[] record, SecretKey customKey) throws InvalidProtocolBufferException { + public void putRecord( + String tenantID, String recordSpace, String table, byte[] record, SecretKey customKey) + throws InvalidProtocolBufferException { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metaDataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); + FDBMetaDataStore metaDataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); - Descriptors.Descriptor descriptor = metaDataStore.getRecordMetaData().getRecordsDescriptor().findMessageTypeByName(table); + Descriptors.Descriptor descriptor = + metaDataStore.getRecordMetaData().getRecordsDescriptor().findMessageTypeByName(table); if (descriptor == null) { throw new RuntimeException("cannot find descriptor for table " + table); } - FDBRecordStore r = createFDBRecordStore(context, metaDataStore, customKey, tenantID, recordSpace); + FDBRecordStore r = + createFDBRecordStore(context, metaDataStore, customKey, tenantID, recordSpace); DynamicMessage msg = DynamicMessage.parseFrom(descriptor, record); r.saveRecord(msg); context.commit(); } - - public void putRecord(String tenantID, String recordSpace, String table, byte[] record) throws InvalidProtocolBufferException { + public void putRecord(String tenantID, String recordSpace, String table, byte[] record) + throws InvalidProtocolBufferException { putRecord(tenantID, recordSpace, table, record, defaultKey); } + public void putRecord( + String tenantID, + String managedSchemaType, + String managedSchema, + RecordMetaData recordMetaData, + Message message) { + putRecord(tenantID, managedSchemaType, managedSchema, recordMetaData, message, defaultKey); + } + + public void putRecord( + String tenantID, + String managedSchemaType, + String managedSchema, + RecordMetaData recordMetaData, + Message message, + SecretKey secretKey) { + FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); + FDBRecordStore r = + createFDBRecordStore( + context, recordMetaData, secretKey, tenantID, managedSchemaType, managedSchema); + + LOGGER.info( + "saving {} for {}/{}/{}", + r.saveRecord(message).toString(), + tenantID, + managedSchemaType, + managedSchema); + context.commit(); + } + + public List scanRecords( + String tenantID, + String managedSchemaType, + String managedSchema, + RecordMetaData recordMetaData, + RecordQuery query) { + return scanRecords( + tenantID, managedSchemaType, managedSchema, recordMetaData, query, defaultKey); + } + + public List scanRecords( + String tenantID, + String managedSchemaType, + String managedSchema, + RecordMetaData recordMetaData, + RecordQuery query, + SecretKey secretKey) { + FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); + FDBRecordStore r = + createFDBRecordStore( + context, recordMetaData, secretKey, tenantID, managedSchemaType, managedSchema); + + return this.executeQuery(r, query) + .map(FDBRecord::getRecord) + .filter(Objects::nonNull) + .asList() + .join(); + } + + public boolean deleteRecord( + String tenantID, + String managedSchemaType, + String managedSchema, + RecordMetaData recordMetaData, + Tuple primaryKey) { + return deleteRecord( + tenantID, managedSchemaType, managedSchema, recordMetaData, primaryKey, defaultKey); + } + + public boolean deleteRecord( + String tenantID, + String managedSchemaType, + String managedSchema, + RecordMetaData recordMetaData, + Tuple primaryKey, + SecretKey secretKey) { + FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); + FDBRecordStore r = + createFDBRecordStore( + context, recordMetaData, secretKey, tenantID, managedSchemaType, managedSchema); + + return r.deleteRecord(primaryKey); + } + public List queryRecords(String tenantID, String recordSpace, RecordQuery query) { return queryRecords(tenantID, recordSpace, query, defaultKey); } - public List queryRecords(String tenantID, String recordSpace, RecordQuery query, SecretKey key) { + public List queryRecords( + String tenantID, String recordSpace, RecordQuery query, SecretKey key) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metadataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); + FDBMetaDataStore metadataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, recordSpace); FDBRecordStore r = createFDBRecordStore(context, metadataStore, key, tenantID, recordSpace); - return this.executeQuery(r, query, tenantID, recordSpace) - .map(e -> { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("found record '{}' from {}/{}", e.getPrimaryKey(), tenantID, recordSpace); - } - return e; - }) - .map(FDBRecord::getRecord) - .asList() - .join(); + return this.executeQuery(r, query) + .map( + e -> { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + "found record '{}' from {}/{}", e.getPrimaryKey(), tenantID, recordSpace); + } + return e; + }) + .map(FDBRecord::getRecord) + .asList() + .join(); } - public void queryRecords(String tenantID, String recordSpace, RecordQuery query, IsolationLevel isolationLevel, StreamObserver responseObserver) { + public void queryRecords( + String tenantID, + String recordSpace, + RecordQuery query, + IsolationLevel isolationLevel, + StreamObserver responseObserver) { queryRecords(tenantID, recordSpace, query, isolationLevel, defaultKey, responseObserver); } - public void queryRecords(String tenantID, String container, RecordQuery query, IsolationLevel isolationLevel, SecretKey key, StreamObserver responseObserver) { + public void queryRecords( + String tenantID, + String container, + RecordQuery query, + IsolationLevel isolationLevel, + SecretKey key, + StreamObserver responseObserver) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metadataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); + FDBMetaDataStore metadataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); FDBRecordStore r = createFDBRecordStore(context, metadataStore, key, tenantID, container); - this.executeQuery(r, query, isolationLevel, tenantID, container) - .map(e -> { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("found record '{}' from {}/{}", e.getPrimaryKey(), tenantID, container); - } - return e; - }) - .map(FDBRecord::getRecord) - .map(Message::toByteString) - .forEach(e -> responseObserver.onNext(RecordStoreProtocol.QueryResponse.newBuilder().setRecord(e).build())) - .join(); + this.executeQuery(r, query, isolationLevel) + .map( + e -> { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + "found record '{}' from {}/{}", e.getPrimaryKey(), tenantID, container); + } + return e; + }) + .map(FDBRecord::getRecord) + .map(Message::toByteString) + .forEach( + e -> + responseObserver.onNext( + RecordStoreProtocol.QueryResponse.newBuilder().setRecord(e).build())) + .join(); } - public void queryRecords(String tenantID, String container, RecordQuery query, Promise>> future) { + public void queryRecords( + String tenantID, + String container, + RecordQuery query, + Promise>> future) { queryRecords(tenantID, container, query, defaultKey, future); } - public void queryRecords(String tenantID, String container, RecordQuery query, SecretKey encryptionKey, Promise>> future) { + public void queryRecords( + String tenantID, + String container, + RecordQuery query, + SecretKey encryptionKey, + Promise>> future) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metadataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); - FDBRecordStore r = createFDBRecordStore(context, metadataStore, encryptionKey, tenantID, container); + FDBMetaDataStore metadataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); + FDBRecordStore r = + createFDBRecordStore(context, metadataStore, encryptionKey, tenantID, container); - Descriptors.Descriptor descriptor = r.getRecordMetaData().getRecordsDescriptor().findMessageTypeByName("User"); + Descriptors.Descriptor descriptor = + r.getRecordMetaData().getRecordsDescriptor().findMessageTypeByName("User"); List> result = null; try { - result = this.executeQuery(r, query, tenantID, container) - .map(e -> { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("found record '{}' from {}/{}", e.getPrimaryKey(), tenantID, container); - } - return e; - }) - .map(queriedRecord -> { - try { - return DynamicMessage.parseFrom(descriptor, queriedRecord.getStoredRecord().getRecord().toByteArray()); - } catch (InvalidProtocolBufferException e) { - return null; - } - }) - .filter(Objects::nonNull) - - // TODO: can we replace `graphql.schema.PropertyDataFetcher` to avoid casting things as an HashMap? - .map(dynamicMessage -> { - Map results = new HashMap<>(); - dynamicMessage.getAllFields().forEach((key, value) -> results.put(key.getName(), value)); - return results; - }) - .asList().get(); + result = + this.executeQuery(r, query) + .map( + e -> { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + "found record '{}' from {}/{}", e.getPrimaryKey(), tenantID, container); + } + return e; + }) + .map( + queriedRecord -> { + try { + return DynamicMessage.parseFrom( + descriptor, queriedRecord.getStoredRecord().getRecord().toByteArray()); + } catch (InvalidProtocolBufferException e) { + return null; + } + }) + .filter(Objects::nonNull) + + // TODO: can we replace `graphql.schema.PropertyDataFetcher` to avoid casting things + // as an HashMap? + .map( + dynamicMessage -> { + Map results = new HashMap<>(); + dynamicMessage + .getAllFields() + .forEach((key, value) -> results.put(key.getName(), value)); + return results; + }) + .asList() + .get(); future.complete(result); } catch (InterruptedException | ExecutionException e) { future.fail(e); } } - private RecordCursor> executeQuery(FDBRecordStore r, RecordQuery query, String tenantID, String container) { - return this.executeQuery(r, query, IsolationLevel.SERIALIZABLE, tenantID, container); + private RecordCursor> executeQuery( + FDBRecordStore r, RecordQuery query) { + return this.executeQuery(r, query, IsolationLevel.SERIALIZABLE); } - private RecordCursor> executeQuery(FDBRecordStore r, RecordQuery query, IsolationLevel isolationLevel, String tenantID, String container) { + private RecordCursor> executeQuery( + FDBRecordStore r, RecordQuery query, IsolationLevel isolationLevel) { // TODO: handle errors instead of throwing null if (query == null) { LOGGER.error("query is null, skipping"); @@ -481,12 +630,11 @@ private RecordCursor> executeQuery(FDBRecordStore r, R LOGGER.info(query.toString()); - RecordQueryPlan plan = r.planQuery(query); - LOGGER.info("running query for {}/{}: '{}'", tenantID, container, plan); - - ExecuteProperties.Builder executeProperties = ExecuteProperties.newBuilder() - .setIsolationLevel(isolationLevel) - .setDefaultCursorStreamingMode(CursorStreamingMode.ITERATOR); // either WANT_ALL OR streaming mode + ExecuteProperties.Builder executeProperties = + ExecuteProperties.newBuilder() + .setIsolationLevel(isolationLevel) + .setDefaultCursorStreamingMode( + CursorStreamingMode.ITERATOR); // either WANT_ALL OR streaming mode executeProperties.setScannedBytesLimit(1_000_000); // 1MB @@ -499,7 +647,8 @@ public long deleteAllRecords(String tenantID, String container) { public long deleteAllRecords(String tenantID, String container, SecretKey key) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metadataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); + FDBMetaDataStore metadataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); FDBRecordStore r = createFDBRecordStore(context, metadataStore, key, tenantID, container); r.deleteAllRecords(); // TODO: return count of records with the call stats @@ -512,18 +661,22 @@ public long deleteRecords(String tenantID, String container, RecordQuery query) public long deleteRecords(String tenantID, String container, RecordQuery query, SecretKey key) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metadataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); + FDBMetaDataStore metadataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); FDBRecordStore r = createFDBRecordStore(context, metadataStore, key, tenantID, container); - Integer count = this.executeQuery(r, query, tenantID, container) - .map(e -> { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("deleting {} from {}/{}", e.getPrimaryKey(), tenantID, container); - } - return e; - }) - .map(e -> r.deleteRecord(e.getPrimaryKey())) - .getCount().join(); + Integer count = + this.executeQuery(r, query) + .map( + e -> { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("deleting {} from {}/{}", e.getPrimaryKey(), tenantID, container); + } + return e; + }) + .map(e -> r.deleteRecord(e.getPrimaryKey())) + .getCount() + .join(); context.commit(); return count; } @@ -534,26 +687,55 @@ public String getQueryPlan(String tenantID, String container, RecordQuery query) public String getQueryPlan(String tenantID, String container, RecordQuery query, SecretKey key) { FDBRecordContext context = db.openContext(Collections.singletonMap("tenant", tenantID), timer); - FDBMetaDataStore metadataStore = RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); + FDBMetaDataStore metadataStore = + RecordStoreMetaDataStore.createMetadataStore(context, tenantID, container); FDBRecordStore r = createFDBRecordStore(context, metadataStore, key, tenantID, container); return r.planQuery(query).toString(); } - private FDBRecordStore createFDBRecordStore(FDBRecordContext context, - FDBMetaDataStore metaDataStore, - SecretKey key, String tenantID, String container) { + private FDBRecordStore createFDBRecordStore( + FDBRecordContext context, + FDBMetaDataStore metaDataStore, + SecretKey key, + String tenantID, + String container) { + + TransformedRecordSerializer serializer = + TransformedRecordSerializerJCE.newDefaultBuilder() + .setEncryptWhenSerializing(true) + .setCompressWhenSerializing(true) + .setEncryptionKey(key) + .build(); + + return FDBRecordStore.newBuilder() + .setMetaDataProvider(metaDataStore) + .setContext(context) + .setSerializer(serializer) + .setKeySpacePath(RecordStoreKeySpace.openDataKeySpacePath(tenantID, container)) + .createOrOpen(); + } - TransformedRecordSerializer serializer = TransformedRecordSerializerJCE.newDefaultBuilder() - .setEncryptWhenSerializing(true) - .setCompressWhenSerializing(true) - .setEncryptionKey(key) - .build(); + private FDBRecordStore createFDBRecordStore( + FDBRecordContext context, + RecordMetaData recordMetaData, + SecretKey key, + String tenantID, + String managedSchemaType, + String managedSchema) { + + TransformedRecordSerializer serializer = + TransformedRecordSerializerJCE.newDefaultBuilder() + .setEncryptWhenSerializing(true) + .setCompressWhenSerializing(true) + .setEncryptionKey(key) + .build(); return FDBRecordStore.newBuilder() - .setMetaDataProvider(metaDataStore) - .setContext(context) - .setSerializer(serializer) - .setKeySpacePath(RecordStoreKeySpace.getDataKeySpacePath(tenantID, container)) - .createOrOpen(); + .setMetaDataProvider(recordMetaData) + .setContext(context) + .setSerializer(serializer) + .setKeySpacePath( + ManagedSchemaKeySpace.openDataKeySpacePath(tenantID, managedSchemaType, managedSchema)) + .createOrOpen(); } } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreKeySpace.java b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreKeySpace.java index a3482d2..dfa9b82 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreKeySpace.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreKeySpace.java @@ -23,33 +23,36 @@ public class RecordStoreKeySpace { public static final String APPLICATION_NAME = "record-store"; public static final KeySpace RS_KEY_SPACE = - new KeySpace( - new DirectoryLayerDirectory("application") - .addSubdirectory(new KeySpaceDirectory("tenant", KeySpaceDirectory.KeyType.STRING) - .addSubdirectory(new KeySpaceDirectory("recordSpace", KeySpaceDirectory.KeyType.STRING) - .addSubdirectory(new KeySpaceDirectory("metadata", KeySpaceDirectory.KeyType.STRING, "m")) - .addSubdirectory(new KeySpaceDirectory("data", KeySpaceDirectory.KeyType.STRING, "d")) - ))); + new KeySpace( + new DirectoryLayerDirectory("application") + .addSubdirectory( + new KeySpaceDirectory("tenant", KeySpaceDirectory.KeyType.STRING) + .addSubdirectory( + new KeySpaceDirectory("recordSpace", KeySpaceDirectory.KeyType.STRING) + .addSubdirectory( + new KeySpaceDirectory( + "metadata", KeySpaceDirectory.KeyType.STRING, "m")) + .addSubdirectory( + new KeySpaceDirectory( + "data", KeySpaceDirectory.KeyType.STRING, "d"))))); - public static KeySpacePath getMetaDataKeySpacePath(String tenant, String recordSpace) { - return getKeySpacePath(tenant, recordSpace, "metadata"); + public static KeySpacePath openMetaDataKeySpacePath(String tenant, String recordSpace) { + return openKeySpacePath(tenant, recordSpace, "metadata"); } - public static KeySpacePath getApplicationKeySpacePath(String tenant) { - return RS_KEY_SPACE - .path("application", APPLICATION_NAME) - .add("tenant", tenant); + public static KeySpacePath openKeySpacePath(String tenant) { + return RS_KEY_SPACE.path("application", APPLICATION_NAME).add("tenant", tenant); } - public static KeySpacePath getDataKeySpacePath(String tenant, String recordSpace) { - return getKeySpacePath(tenant, recordSpace, "data"); + public static KeySpacePath openDataKeySpacePath(String tenant, String recordSpace) { + return openKeySpacePath(tenant, recordSpace, "data"); } - private static KeySpacePath getKeySpacePath(String tenant, String env, String subDirectory) { + private static KeySpacePath openKeySpacePath(String tenant, String env, String subDirectory) { return RS_KEY_SPACE - .path("application", APPLICATION_NAME) - .add("tenant", tenant) - .add("recordSpace", env) - .add(subDirectory); + .path("application", APPLICATION_NAME) + .add("tenant", tenant) + .add("recordSpace", env) + .add(subDirectory); } } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreMetaDataStore.java b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreMetaDataStore.java index ddf622b..ec448e0 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreMetaDataStore.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/RecordStoreMetaDataStore.java @@ -19,8 +19,10 @@ import com.apple.foundationdb.record.provider.foundationdb.FDBRecordContext; public class RecordStoreMetaDataStore { - public static FDBMetaDataStore createMetadataStore(FDBRecordContext context, String tenant, String env) { - FDBMetaDataStore metaDataStore = new FDBMetaDataStore(context, RecordStoreKeySpace.getMetaDataKeySpacePath(tenant, env)); + public static FDBMetaDataStore createMetadataStore( + FDBRecordContext context, String tenant, String env) { + FDBMetaDataStore metaDataStore = + new FDBMetaDataStore(context, RecordStoreKeySpace.openMetaDataKeySpacePath(tenant, env)); metaDataStore.setMaintainHistory(true); return metaDataStore; } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/UniversalIndexes.java b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/UniversalIndexes.java index 73e330d..4232b08 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/UniversalIndexes.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/UniversalIndexes.java @@ -24,15 +24,25 @@ public class UniversalIndexes { public static final String COUNT_INDEX_NAME = "globalRecordCount"; - public static final Index COUNT_INDEX = new Index(COUNT_INDEX_NAME, - new GroupingKeyExpression(RecordTypeKeyExpression.RECORD_TYPE_KEY, 0), IndexTypes.COUNT); + public static final Index COUNT_INDEX = + new Index( + COUNT_INDEX_NAME, + new GroupingKeyExpression(RecordTypeKeyExpression.RECORD_TYPE_KEY, 0), + IndexTypes.COUNT); public static final String COUNT_UPDATES_INDEX_NAME = "globalRecordUpdateCount"; - public static final Index COUNT_UPDATES_INDEX = new Index(COUNT_UPDATES_INDEX_NAME, - new GroupingKeyExpression(RecordTypeKeyExpression.RECORD_TYPE_KEY, 0), IndexTypes.COUNT_UPDATES); + public static final Index COUNT_UPDATES_INDEX = + new Index( + COUNT_UPDATES_INDEX_NAME, + new GroupingKeyExpression(RecordTypeKeyExpression.RECORD_TYPE_KEY, 0), + IndexTypes.COUNT_UPDATES); - public static IndexAggregateFunction INDEX_COUNT_AGGREGATE_FUNCTION = new IndexAggregateFunction( - FunctionNames.COUNT, COUNT_INDEX.getRootExpression(), COUNT_INDEX.getName()); - public static IndexAggregateFunction INDEX_COUNT_UPDATES_AGGREGATE_FUNCTION = new IndexAggregateFunction( - FunctionNames.COUNT_UPDATES, COUNT_UPDATES_INDEX.getRootExpression(), COUNT_UPDATES_INDEX.getName()); + public static IndexAggregateFunction INDEX_COUNT_AGGREGATE_FUNCTION = + new IndexAggregateFunction( + FunctionNames.COUNT, COUNT_INDEX.getRootExpression(), COUNT_INDEX.getName()); + public static IndexAggregateFunction INDEX_COUNT_UPDATES_AGGREGATE_FUNCTION = + new IndexAggregateFunction( + FunctionNames.COUNT_UPDATES, + COUNT_UPDATES_INDEX.getRootExpression(), + COUNT_UPDATES_INDEX.getName()); } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/metrics/FDBMetricsStoreTimer.java b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/metrics/FDBMetricsStoreTimer.java index 628f1d3..6462410 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/metrics/FDBMetricsStoreTimer.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/fdb/metrics/FDBMetricsStoreTimer.java @@ -18,9 +18,8 @@ import com.apple.foundationdb.record.provider.foundationdb.FDBStoreTimer; import io.micrometer.core.instrument.Metrics; import io.vertx.micrometer.backends.BackendRegistries; - -import javax.annotation.Nonnull; import java.util.Set; +import javax.annotation.Nonnull; public class FDBMetricsStoreTimer extends FDBStoreTimer { @@ -36,41 +35,42 @@ public FDBMetricsStoreTimer(boolean enableExport) { } /** - * Record the amount of time each element in a set of events took to run. - * This applies the same time difference to each event in the set. + * Record the amount of time each element in a set of events took to run. This applies the same + * time difference to each event in the set. * - * @param events the set of events being recorded + * @param events the set of events being recorded * @param timeDifferenceNanos the time that the instrumented events took to run */ @Override public void record(Set events, long timeDifferenceNanos) { if (export) { for (Event count : events) { - Metrics.counter(buildClassname(count.name() + "_ns"), "log_key", count.logKey()).increment(timeDifferenceNanos); + Metrics.counter(buildClassname(count.name() + "_ns"), "log_key", count.logKey()) + .increment(timeDifferenceNanos); } } super.record(events, timeDifferenceNanos); } /** - * Record the amount of time an event took to run. - * Subclasses can extend this to also update metrics aggregation or - * monitoring services. + * Record the amount of time an event took to run. Subclasses can extend this to also update + * metrics aggregation or monitoring services. * - * @param event the event being recorded + * @param event the event being recorded * @param timeDifferenceNanos the time that instrumented event took to run */ @Override public void record(Event event, long timeDifferenceNanos) { if (export) { - Metrics.counter(buildClassname(event.name() + "_ns"), "log_key", event.logKey()).increment(timeDifferenceNanos); + Metrics.counter(buildClassname(event.name() + "_ns"), "log_key", event.logKey()) + .increment(timeDifferenceNanos); } super.record(event, timeDifferenceNanos); } /** - * Record that each event in a set occurred once. This increments - * the counters associated with each event. + * Record that each event in a set occurred once. This increments the counters associated with + * each event. * * @param events the set of events being recorded */ @@ -85,8 +85,8 @@ public void increment(@Nonnull Set events) { } /** - * Record that an event occurred once. This increments the counter associated - * with the given event. + * Record that an event occurred once. This increments the counter associated with the given + * event. * * @param event the event being recorded */ @@ -103,8 +103,8 @@ private String buildClassname(String name) { } /** - * Record that each event occurred one or more times. This increments - * the counters associated with each event by amount. + * Record that each event occurred one or more times. This increments the counters associated with + * each event by amount. * * @param events the set of events being recorded * @param amount the number of times each event occurred @@ -120,10 +120,10 @@ public void increment(@Nonnull Set events, int amount) { } /** - * Record that an event occurred one or more times. This increments the - * counter associated with the given event by amount. + * Record that an event occurred one or more times. This increments the counter associated with + * the given event by amount. * - * @param event the event being recorded + * @param event the event being recorded * @param amount the number of times the event occurred */ @Override diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGenerator.java b/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGenerator.java index 8f94197..48d8cb2 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGenerator.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGenerator.java @@ -25,7 +25,6 @@ import graphql.schema.GraphQLFieldDefinition; import graphql.schema.GraphQLScalarType; import graphql.schema.idl.SchemaPrinter; - import java.util.stream.Collectors; public class GraphQLSchemaGenerator { @@ -34,17 +33,19 @@ public static String generate(RecordMetaData recordMetaData) { SchemaPrinter schemaPrinter = new SchemaPrinter(); // Converting struct from protobuf to graphQL - String schema = recordMetaData.getRecordTypes().values().stream() - .map(e -> ProtoToGql.convert(e.getDescriptor(), SchemaOptions.defaultOptions())) - .map(schemaPrinter::print) - .collect(Collectors.joining("\n")); + String schema = + recordMetaData.getRecordTypes().values().stream() + .map(e -> ProtoToGql.convert(e.getDescriptor(), SchemaOptions.defaultOptions())) + .map(schemaPrinter::print) + .collect(Collectors.joining("\n")); schema += "\ntype Query {\n"; // for each type, we can retrieve all the associated queries - schema += recordMetaData.getRecordTypes().values().stream() - .map(GraphQLSchemaGenerator::generateQueries) - .collect(Collectors.joining("\n")); + schema += + recordMetaData.getRecordTypes().values().stream() + .map(GraphQLSchemaGenerator::generateQueries) + .collect(Collectors.joining("\n")); schema += "}\n"; @@ -54,9 +55,16 @@ public static String generate(RecordMetaData recordMetaData) { private static String generateQueries(RecordType e) { StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append(" # retrieve a list of ").append(e.getName()).append(". Use limit to retrieve only a number of records\n"); - stringBuilder.append(" all").append(e.getName()).append("s(limit: Int): [").append(e.getName()).append("!]!\n\n"); - + stringBuilder + .append(" # retrieve a list of ") + .append(e.getName()) + .append(". Use limit to retrieve only a number of records\n"); + stringBuilder + .append(" all") + .append(e.getName()) + .append("s(limit: Int): [") + .append(e.getName()) + .append("!]!\n\n"); for (Index index : e.getAllIndexes()) { generateQueryWithIndex(index, e, stringBuilder); @@ -65,7 +73,8 @@ private static String generateQueries(RecordType e) { return stringBuilder.toString(); } - private static void generateQueryWithIndex(Index i, RecordType recordType, StringBuilder stringBuilder) { + private static void generateQueryWithIndex( + Index i, RecordType recordType, StringBuilder stringBuilder) { if (i.getName().endsWith(RecordStoreProtocol.IndexType.VALUE.toString())) { String[] splits = i.getName().split("_"); @@ -82,7 +91,8 @@ private static void generateQueryWithIndex(Index i, RecordType recordType, Strin return; } - GraphQLFieldDefinition graphqlField = ProtoToGql.convertField(protoField, SchemaOptions.defaultOptions()); + GraphQLFieldDefinition graphqlField = + ProtoToGql.convertField(protoField, SchemaOptions.defaultOptions()); if (graphqlField == null) { return; } @@ -96,8 +106,23 @@ private static void generateQueryWithIndex(Index i, RecordType recordType, Strin String fieldNameWithUpper = field.substring(0, 1).toUpperCase() + field.substring(1); stringBuilder - .append(" # get a ").append(type).append(" using the ").append(field).append(" field\n") - .append(" get").append(type).append("By").append(fieldNameWithUpper).append("(").append(field).append(": ").append(graphQLType).append(")").append(": ").append(type).append("!\n\n"); + .append(" # get a ") + .append(type) + .append(" using the ") + .append(field) + .append(" field\n") + .append(" get") + .append(type) + .append("By") + .append(fieldNameWithUpper) + .append("(") + .append(field) + .append(": ") + .append(graphQLType) + .append(")") + .append(": ") + .append(type) + .append("!\n\n"); } } } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/RecordStoreGraphQLHandler.java b/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/RecordStoreGraphQLHandler.java index 6fd436b..11b6fe8 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/RecordStoreGraphQLHandler.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/graphql/RecordStoreGraphQLHandler.java @@ -15,6 +15,12 @@ */ package fr.pierrezemb.recordstore.graphql; +import static fr.pierrezemb.recordstore.datasets.DatasetsLoader.DEFAULT_DEMO_TENANT; +import static graphql.schema.idl.RuntimeWiring.newRuntimeWiring; +import static io.vertx.core.http.HttpMethod.GET; +import static io.vertx.core.http.HttpMethod.POST; +import static java.util.stream.Collectors.toList; + import com.apple.foundationdb.record.RecordMetaData; import com.apple.foundationdb.record.query.RecordQuery; import fr.pierrezemb.recordstore.fdb.RecordLayer; @@ -42,36 +48,32 @@ import io.vertx.ext.web.handler.graphql.impl.GraphQLBatch; import io.vertx.ext.web.handler.graphql.impl.GraphQLInput; import io.vertx.ext.web.handler.graphql.impl.GraphQLQuery; -import org.dataloader.DataLoaderRegistry; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; import java.util.function.Function; - -import static fr.pierrezemb.recordstore.datasets.DatasetsLoader.DEFAULT_DEMO_TENANT; -import static graphql.schema.idl.RuntimeWiring.newRuntimeWiring; -import static io.vertx.core.http.HttpMethod.GET; -import static io.vertx.core.http.HttpMethod.POST; -import static java.util.stream.Collectors.toList; +import org.dataloader.DataLoaderRegistry; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Taken from https://github.com/vert-x3/vertx-web/blob/3.9/vertx-web-graphql/src/main/java/io/vertx/ext/web/handler/graphql/impl/GraphQLHandlerImpl.java + * Taken from + * https://github.com/vert-x3/vertx-web/blob/3.9/vertx-web-graphql/src/main/java/io/vertx/ext/web/handler/graphql/impl/GraphQLHandlerImpl.java */ public class RecordStoreGraphQLHandler implements GraphQLHandler { private static final Logger LOGGER = LoggerFactory.getLogger(RecordStoreGraphQLHandler.class); private static final Function DEFAULT_QUERY_CONTEXT_FACTORY = rc -> rc; - private static final Function DEFAULT_DATA_LOADER_REGISTRY_FACTORY = rc -> null; + private static final Function + DEFAULT_DATA_LOADER_REGISTRY_FACTORY = rc -> null; private static final Function DEFAULT_LOCALE_FACTORY = rc -> null; private final RecordLayer recordLayer; private Function queryContextFactory = DEFAULT_QUERY_CONTEXT_FACTORY; - private Function dataLoaderRegistryFactory = DEFAULT_DATA_LOADER_REGISTRY_FACTORY; + private Function dataLoaderRegistryFactory = + DEFAULT_DATA_LOADER_REGISTRY_FACTORY; private Function localeFactory = DEFAULT_LOCALE_FACTORY; public RecordStoreGraphQLHandler(RecordLayer recordLayer) { @@ -85,7 +87,8 @@ public synchronized GraphQLHandler queryContext(Function } @Override - public synchronized GraphQLHandler dataLoaderRegistry(Function factory) { + public synchronized GraphQLHandler dataLoaderRegistry( + Function factory) { dataLoaderRegistryFactory = factory != null ? factory : DEFAULT_DATA_LOADER_REGISTRY_FACTORY; return this; } @@ -149,14 +152,17 @@ private void handlePost(RoutingContext rc, Buffer body) { handlePostJson(rc, body, rc.queryParams().get("operationName"), variables); break; case "application/graphql": - executeOne(rc, new GraphQLQuery(body.toString(), rc.queryParams().get("operationName"), variables)); + executeOne( + rc, + new GraphQLQuery(body.toString(), rc.queryParams().get("operationName"), variables)); break; default: rc.fail(415); } } - private void handlePostJson(RoutingContext rc, Buffer body, String operationName, Map variables) { + private void handlePostJson( + RoutingContext rc, Buffer body, String operationName, Map variables) { GraphQLInput graphQLInput; try { graphQLInput = JsonCodec.INSTANCE.fromBuffer(body, GraphQLInput.class); @@ -173,11 +179,9 @@ private void handlePostJson(RoutingContext rc, Buffer body, String operationName } } - private void handlePostBatch(RoutingContext rc, GraphQLBatch batch, String operationName, Map variables) { - /**if (!options.isRequestBatchingEnabled()) { - rc.fail(400); - return; - }*/ + private void handlePostBatch( + RoutingContext rc, GraphQLBatch batch, String operationName, Map variables) { + /** if (!options.isRequestBatchingEnabled()) { rc.fail(400); return; } */ for (GraphQLQuery query : batch) { if (query.getQuery() == null) { failQueryMissing(rc); @@ -194,20 +198,22 @@ private void handlePostBatch(RoutingContext rc, GraphQLBatch batch, String opera } private void executeBatch(RoutingContext rc, GraphQLBatch batch) { - List> results = batch.stream() - .map(q -> execute(rc, q)) - .collect(toList()); + List> results = + batch.stream().map(q -> execute(rc, q)).collect(toList()); CompletableFuture.allOf(results.toArray(new CompletableFuture[0])) - .thenApply(v -> { - JsonArray jsonArray = results.stream() - .map(CompletableFuture::join) - .collect(JsonArray::new, JsonArray::add, JsonArray::addAll); - return jsonArray.toBuffer(); - }) - .whenComplete((buffer, throwable) -> sendResponse(rc, buffer, throwable)); + .thenApply( + v -> { + JsonArray jsonArray = + results.stream() + .map(CompletableFuture::join) + .collect(JsonArray::new, JsonArray::add, JsonArray::addAll); + return jsonArray.toBuffer(); + }) + .whenComplete((buffer, throwable) -> sendResponse(rc, buffer, throwable)); } - private void handlePostQuery(RoutingContext rc, GraphQLQuery query, String operationName, Map variables) { + private void handlePostQuery( + RoutingContext rc, GraphQLQuery query, String operationName, Map variables) { if (query.getQuery() == null) { failQueryMissing(rc); return; @@ -223,8 +229,8 @@ private void handlePostQuery(RoutingContext rc, GraphQLQuery query, String opera private void executeOne(RoutingContext rc, GraphQLQuery query) { execute(rc, query) - .thenApply(JsonObject::toBuffer) - .whenComplete((buffer, throwable) -> sendResponse(rc, buffer, throwable)); + .thenApply(JsonObject::toBuffer) + .whenComplete((buffer, throwable) -> sendResponse(rc, buffer, throwable)); } private CompletableFuture execute(RoutingContext rc, GraphQLQuery query) { @@ -266,12 +272,15 @@ private CompletableFuture execute(RoutingContext rc, GraphQLQuery qu GraphQL graphQL = createGraphQL(DEFAULT_DEMO_TENANT, "USER"); - return graphQL.executeAsync(builder.build()).thenApplyAsync(executionResult -> { - return new JsonObject(executionResult.toSpecification()); - }, contextExecutor(rc)); + return graphQL + .executeAsync(builder.build()) + .thenApplyAsync( + executionResult -> { + return new JsonObject(executionResult.toSpecification()); + }, + contextExecutor(rc)); } - private String getContentType(RoutingContext rc) { String contentType = rc.parsedHeaders().contentType().value(); return contentType.isEmpty() ? "application/json" : contentType.toLowerCase(); @@ -318,21 +327,26 @@ private GraphQL createGraphQL(String tenant, String recordSpace) { SchemaParser schemaParser = new SchemaParser(); TypeDefinitionRegistry typeDefinitionRegistry = schemaParser.parse(schema); - RuntimeWiring runtimeWiring = newRuntimeWiring() - .type("Query", builder -> { - VertxDataFetcher>> getAllRecords = new VertxDataFetcher<>(this::getAllRecords); - return builder.dataFetcher("allUsers", getAllRecords); - }) - .build(); + RuntimeWiring runtimeWiring = + newRuntimeWiring() + .type( + "Query", + builder -> { + VertxDataFetcher>> getAllRecords = + new VertxDataFetcher<>(this::getAllRecords); + return builder.dataFetcher("allUsers", getAllRecords); + }) + .build(); SchemaGenerator schemaGenerator = new SchemaGenerator(); - GraphQLSchema graphQLSchema = schemaGenerator.makeExecutableSchema(typeDefinitionRegistry, runtimeWiring); + GraphQLSchema graphQLSchema = + schemaGenerator.makeExecutableSchema(typeDefinitionRegistry, runtimeWiring); - return GraphQL.newGraphQL(graphQLSchema) - .build(); + return GraphQL.newGraphQL(graphQLSchema).build(); } - private void getAllRecords(DataFetchingEnvironment env, Promise>> future) { + private void getAllRecords( + DataFetchingEnvironment env, Promise>> future) { RecordQuery query = GraphQLQueryGenerator.generate(env); this.recordLayer.queryRecords("demo", "USER", query, future); } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AdminService.java b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AdminService.java index 2cbab8a..91d6f93 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AdminService.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AdminService.java @@ -21,18 +21,16 @@ import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; +import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; - public class AdminService extends AdminServiceGrpc.AdminServiceImplBase { private static final Logger LOGGER = LoggerFactory.getLogger(AdminService.class); private final RecordLayer recordLayer; public AdminService(RecordLayer recordLayer) { this.recordLayer = recordLayer; - } /** @@ -40,7 +38,9 @@ public AdminService(RecordLayer recordLayer) { * @param responseObserver */ @Override - public void list(RecordStoreProtocol.ListContainerRequest request, StreamObserver responseObserver) { + public void list( + RecordStoreProtocol.ListContainerRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); List results; @@ -51,9 +51,8 @@ public void list(RecordStoreProtocol.ListContainerRequest request, StreamObserve throw new StatusRuntimeException(Status.INTERNAL.withCause(e)); } - responseObserver.onNext(RecordStoreProtocol.ListContainerResponse.newBuilder() - .addAllContainers(results) - .build()); + responseObserver.onNext( + RecordStoreProtocol.ListContainerResponse.newBuilder().addAllContainers(results).build()); responseObserver.onCompleted(); } @@ -62,7 +61,9 @@ public void list(RecordStoreProtocol.ListContainerRequest request, StreamObserve * @param responseObserver */ @Override - public void delete(RecordStoreProtocol.DeleteContainerRequest request, StreamObserver responseObserver) { + public void delete( + RecordStoreProtocol.DeleteContainerRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); try { @@ -71,16 +72,18 @@ public void delete(RecordStoreProtocol.DeleteContainerRequest request, StreamObs } } catch (RuntimeException runtimeException) { LOGGER.error("could not delete recordSpace", runtimeException); - throw new StatusRuntimeException(Status.INTERNAL.withDescription(runtimeException.getMessage())); + throw new StatusRuntimeException( + Status.INTERNAL.withDescription(runtimeException.getMessage())); } - responseObserver.onNext(RecordStoreProtocol.EmptyResponse.newBuilder() - .build()); + responseObserver.onNext(RecordStoreProtocol.EmptyResponse.newBuilder().build()); responseObserver.onCompleted(); } @Override - public void ping(RecordStoreProtocol.EmptyRequest request, StreamObserver responseObserver) { + public void ping( + RecordStoreProtocol.EmptyRequest request, + StreamObserver responseObserver) { GrpcContextKeys.getTenantIDOrFail(); responseObserver.onNext(RecordStoreProtocol.EmptyResponse.newBuilder().build()); responseObserver.onCompleted(); diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AuthInterceptor.java b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AuthInterceptor.java index ae3e88e..99d479f 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AuthInterceptor.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/AuthInterceptor.java @@ -27,25 +27,24 @@ import io.grpc.ServerInterceptor; import io.grpc.Status; import io.vavr.control.Either; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.List; import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class AuthInterceptor implements ServerInterceptor { private static final Logger LOGGER = LoggerFactory.getLogger(AuthInterceptor.class); - private static final Map, Context.Key> METADATA_KEY_TO_CONTEXT_KEY = ImmutableMap.of( - GrpcMetadataKeys.RECORDSPACE_METADATA_KEY, GrpcContextKeys.CONTAINER_NAME, - GrpcMetadataKeys.TENANT_METADATA_KEY, GrpcContextKeys.TENANT_ID_KEY - ); + private static final Map, Context.Key> METADATA_KEY_TO_CONTEXT_KEY = + ImmutableMap.of( + GrpcMetadataKeys.RECORDSPACE_METADATA_KEY, GrpcContextKeys.CONTAINER_NAME, + GrpcMetadataKeys.TENANT_METADATA_KEY, GrpcContextKeys.TENANT_ID_KEY); private final BiscuitManager biscuitManager; - private final List> requiredKeys = Arrays.asList( - GrpcMetadataKeys.AUTHORIZATION_METADATA_KEY, - GrpcMetadataKeys.RECORDSPACE_METADATA_KEY, - GrpcMetadataKeys.TENANT_METADATA_KEY - ); + private final List> requiredKeys = + Arrays.asList( + GrpcMetadataKeys.AUTHORIZATION_METADATA_KEY, + GrpcMetadataKeys.RECORDSPACE_METADATA_KEY, + GrpcMetadataKeys.TENANT_METADATA_KEY); public AuthInterceptor(String key) { biscuitManager = new BiscuitManager(key); @@ -53,29 +52,30 @@ public AuthInterceptor(String key) { /** * Intercept {@link ServerCall} dispatch by the {@code next} {@link ServerCallHandler}. General - * semantics of {@link ServerCallHandler#startCall} apply and the returned - * {@link ServerCall.Listener} must not be {@code null}. + * semantics of {@link ServerCallHandler#startCall} apply and the returned {@link + * ServerCall.Listener} must not be {@code null}. * *

If the implementation throws an exception, {@code call} will be closed with an error. * Implementations must not throw an exception if they started processing that may use {@code * call} on another thread. * - * @param call object to receive response messages - * @param headers which can contain extra call metadata from {@link ClientCall#start}, - * e.g. authentication credentials. - * @param next next processor in the interceptor chain + * @param call object to receive response messages + * @param headers which can contain extra call metadata from {@link ClientCall#start}, e.g. + * authentication credentials. + * @param next next processor in the interceptor chain * @return listener for processing incoming messages for {@code call}, never {@code null}. */ @Override - public ServerCall.Listener interceptCall(ServerCall call, Metadata headers, ServerCallHandler next) { + public ServerCall.Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { Context context = Context.current(); LOGGER.info("{}", headers); if (!headers.containsKey(GrpcMetadataKeys.AUTHORIZATION_METADATA_KEY)) { - call.close(Status.PERMISSION_DENIED.withDescription("no authorization token"), new Metadata()); - return new ServerCall.Listener() { - }; + call.close( + Status.PERMISSION_DENIED.withDescription("no authorization token"), new Metadata()); + return new ServerCall.Listener() {}; } String tenant = getFromHeaders(headers, GrpcMetadataKeys.TENANT_METADATA_KEY); @@ -83,27 +83,26 @@ public ServerCall.Listener interceptCall(ServerCall() { - }; + return new ServerCall.Listener() {}; } String token = headers.get(GrpcMetadataKeys.AUTHORIZATION_METADATA_KEY); if (token == null) { call.close(Status.PERMISSION_DENIED.withDescription("no token provided"), new Metadata()); - return new ServerCall.Listener() { - }; + return new ServerCall.Listener() {}; } if (!token.startsWith("Bearer ")) { - call.close(Status.PERMISSION_DENIED.withDescription("expected format 'Bearer my-token'"), new Metadata()); - return new ServerCall.Listener() { - }; + call.close( + Status.PERMISSION_DENIED.withDescription("expected format 'Bearer my-token'"), + new Metadata()); + return new ServerCall.Listener() {}; } - Either result = this.biscuitManager.checkTenant(tenant, token.substring("Bearer ".length())); + Either result = + this.biscuitManager.checkTenant(tenant, token.substring("Bearer ".length())); if (result.isLeft()) { call.close(Status.UNAUTHENTICATED.withDescription("bad tenant and/or token"), headers); - return new ServerCall.Listener() { - }; + return new ServerCall.Listener() {}; } // Admin calls does not need recordSpaces diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcContextKeys.java b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcContextKeys.java index aaf49bf..c0e1318 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcContextKeys.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcContextKeys.java @@ -20,10 +20,9 @@ import io.grpc.StatusRuntimeException; public class GrpcContextKeys { - /** - * Key for accessing requested tenant id - */ + /** Key for accessing requested tenant id */ public static final Context.Key TENANT_ID_KEY = Context.key("tenant"); + public static final Context.Key CONTAINER_NAME = Context.key("recordSpace"); public static String getTenantIDOrFail() throws StatusRuntimeException { @@ -37,7 +36,8 @@ public static String getTenantIDOrFail() throws StatusRuntimeException { public static String getContainerOrFail() throws StatusRuntimeException { String recordSpace = GrpcContextKeys.CONTAINER_NAME.get(); if (recordSpace == null) { - throw new StatusRuntimeException(Status.FAILED_PRECONDITION.withDescription("missing recordSpace")); + throw new StatusRuntimeException( + Status.FAILED_PRECONDITION.withDescription("missing recordSpace")); } return recordSpace; } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcMetadataKeys.java b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcMetadataKeys.java index bf537b9..31d03bc 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcMetadataKeys.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/GrpcMetadataKeys.java @@ -15,12 +15,15 @@ */ package fr.pierrezemb.recordstore.grpc; -import io.grpc.Metadata; - import static io.grpc.Metadata.ASCII_STRING_MARSHALLER; +import io.grpc.Metadata; + public class GrpcMetadataKeys { - public static final Metadata.Key AUTHORIZATION_METADATA_KEY = Metadata.Key.of("Authorization", ASCII_STRING_MARSHALLER); - public static final Metadata.Key TENANT_METADATA_KEY = Metadata.Key.of("Tenant", ASCII_STRING_MARSHALLER); - public static final Metadata.Key RECORDSPACE_METADATA_KEY = Metadata.Key.of("RecordSpace", ASCII_STRING_MARSHALLER); + public static final Metadata.Key AUTHORIZATION_METADATA_KEY = + Metadata.Key.of("Authorization", ASCII_STRING_MARSHALLER); + public static final Metadata.Key TENANT_METADATA_KEY = + Metadata.Key.of("Tenant", ASCII_STRING_MARSHALLER); + public static final Metadata.Key RECORDSPACE_METADATA_KEY = + Metadata.Key.of("RecordSpace", ASCII_STRING_MARSHALLER); } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/ManagedKVService.java b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/ManagedKVService.java new file mode 100644 index 0000000..12b79c9 --- /dev/null +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/ManagedKVService.java @@ -0,0 +1,120 @@ +/** + * Copyright 2020 Pierre Zemb + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package fr.pierrezemb.recordstore.grpc; + +import com.apple.foundationdb.record.RecordMetaData; +import com.apple.foundationdb.record.RecordMetaDataBuilder; +import com.apple.foundationdb.record.metadata.Key; +import com.apple.foundationdb.record.query.RecordQuery; +import com.apple.foundationdb.record.query.expressions.Query; +import com.apple.foundationdb.tuple.Tuple; +import fr.pierrezemb.recordstore.fdb.RecordLayer; +import fr.pierrezemb.recordstore.proto.managed.kv.ManagedKVGrpc; +import fr.pierrezemb.recordstore.proto.managed.kv.ManagedKVProto; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ManagedKVService extends ManagedKVGrpc.ManagedKVImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(ManagedKVService.class); + private static final String MANAGED_KV_NAME = "managedKV"; + private final RecordLayer recordLayer; + private final RecordMetaData recordMetaData; + + public ManagedKVService(RecordLayer recordLayer) { + this.recordLayer = recordLayer; + RecordMetaDataBuilder recordMetaDataBuilder = + RecordMetaData.newBuilder().setRecords(ManagedKVProto.getDescriptor()); + recordMetaDataBuilder.getRecordType("KeyValue").setPrimaryKey(Key.Expressions.field("key")); + this.recordMetaData = recordMetaDataBuilder.build(); + } + + @Override + public void put( + ManagedKVProto.KeyValue request, + StreamObserver responseObserver) { + String tenantID = GrpcContextKeys.getTenantIDOrFail(); + String recordSpace = GrpcContextKeys.getContainerOrFail(); + + try { + this.recordLayer.putRecord( + tenantID, MANAGED_KV_NAME, recordSpace, this.recordMetaData, request); + } catch (RuntimeException e) { + LOGGER.error(e.getMessage()); + throw new StatusRuntimeException(Status.INTERNAL.withDescription(e.getMessage())); + } + + responseObserver.onNext(ManagedKVProto.EmptyResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void delete( + ManagedKVProto.DeleteRequest request, + StreamObserver responseObserver) { + String tenantID = GrpcContextKeys.getTenantIDOrFail(); + String recordSpace = GrpcContextKeys.getContainerOrFail(); + + Tuple primaryKey = Tuple.from(request.getKeyToDelete().toByteArray()); + + try { + boolean deleted = + this.recordLayer.deleteRecord( + tenantID, MANAGED_KV_NAME, recordSpace, this.recordMetaData, primaryKey); + LOGGER.debug("delete({})={}", primaryKey.toString(), deleted); + responseObserver.onNext(ManagedKVProto.EmptyResponse.newBuilder().build()); + } catch (RuntimeException e) { + LOGGER.error(e.getMessage()); + throw new StatusRuntimeException(Status.INTERNAL.withDescription(e.getMessage())); + } + responseObserver.onCompleted(); + } + + @Override + public void scan( + ManagedKVProto.ScanRequest request, + StreamObserver responseObserver) { + String tenantID = GrpcContextKeys.getTenantIDOrFail(); + String recordSpace = GrpcContextKeys.getContainerOrFail(); + + RecordQuery query = + RecordQuery.newBuilder() + .setRecordType("KeyValue") + .setFilter( + request.getEndKey().isEmpty() + ? Query.field("key").equalsValue(request.getStartKey().toByteArray()) + : Query.and( + Query.field("key").greaterThanOrEquals(request.getStartKey().toByteArray()), + Query.field("key").lessThanOrEquals(request.getEndKey().toByteArray()))) + .build(); + + try { + this.recordLayer + .scanRecords(tenantID, MANAGED_KV_NAME, recordSpace, this.recordMetaData, query) + .stream() + .map( + queriedRecord -> + ManagedKVProto.KeyValue.newBuilder().mergeFrom(queriedRecord).build()) + .forEach(responseObserver::onNext); + } catch (RuntimeException e) { + LOGGER.error(e.getMessage()); + throw new StatusRuntimeException(Status.INTERNAL.withDescription(e.getMessage())); + } + responseObserver.onCompleted(); + } +} diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/RecordService.java b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/RecordService.java index 7920ff0..5b3a343 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/RecordService.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/RecordService.java @@ -41,14 +41,18 @@ public RecordService(RecordLayer recordLayer) { * @param responseObserver */ @Override - public void put(RecordStoreProtocol.PutRecordRequest request, StreamObserver responseObserver) { + public void put( + RecordStoreProtocol.PutRecordRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); String recordSpace = GrpcContextKeys.getContainerOrFail(); try { - this.recordLayer.putRecord(tenantID, recordSpace, request.getRecordTypeName(), request.getMessage().toByteArray()); + this.recordLayer.putRecord( + tenantID, recordSpace, request.getRecordTypeName(), request.getMessage().toByteArray()); } catch (InvalidProtocolBufferException e) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("could not parse Protobuf: " + e.getMessage())); + throw new StatusRuntimeException( + Status.INVALID_ARGUMENT.withDescription("could not parse Protobuf: " + e.getMessage())); } catch (RuntimeException e) { log.error(e.getMessage()); throw new StatusRuntimeException(Status.INTERNAL.withDescription(e.getMessage())); @@ -58,19 +62,23 @@ public void put(RecordStoreProtocol.PutRecordRequest request, StreamObserver responseObserver) { + public void query( + RecordStoreProtocol.QueryRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); String recordSpace = GrpcContextKeys.getContainerOrFail(); RecordQuery query = GrpcQueryGenerator.generate(request); - IsolationLevel isolationLevel = request - .getQueryIsolationLevel().equals(RecordStoreProtocol.QueryIsolationLevel.SERIALIZABLE) ? - IsolationLevel.SERIALIZABLE : IsolationLevel.SNAPSHOT; + IsolationLevel isolationLevel = + request + .getQueryIsolationLevel() + .equals(RecordStoreProtocol.QueryIsolationLevel.SERIALIZABLE) + ? IsolationLevel.SERIALIZABLE + : IsolationLevel.SNAPSHOT; try { this.recordLayer.queryRecords(tenantID, recordSpace, query, isolationLevel, responseObserver); @@ -81,13 +89,14 @@ public void query(RecordStoreProtocol.QueryRequest request, StreamObserver responseObserver) { + public void delete( + RecordStoreProtocol.DeleteRecordRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); String recordSpace = GrpcContextKeys.getContainerOrFail(); @@ -100,9 +109,8 @@ public void delete(RecordStoreProtocol.DeleteRecordRequest request, StreamObserv count = this.recordLayer.deleteRecords(tenantID, recordSpace, query); } - responseObserver.onNext(RecordStoreProtocol.DeleteRecordResponse.newBuilder() - .setDeletedCount(count) - .build()); + responseObserver.onNext( + RecordStoreProtocol.DeleteRecordResponse.newBuilder().setDeletedCount(count).build()); responseObserver.onCompleted(); } catch (RuntimeException e) { log.error(e.getMessage()); @@ -111,17 +119,20 @@ public void delete(RecordStoreProtocol.DeleteRecordRequest request, StreamObserv } @Override - public void getQueryPlan(RecordStoreProtocol.QueryRequest request, StreamObserver responseObserver) { + public void getQueryPlan( + RecordStoreProtocol.QueryRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); String recordSpace = GrpcContextKeys.getContainerOrFail(); RecordQuery query = GrpcQueryGenerator.generate(request); try { String queryPlan = this.recordLayer.getQueryPlan(tenantID, recordSpace, query); - responseObserver.onNext(RecordStoreProtocol.GetQueryPlanResponse.newBuilder() - .setQueryPlan(query.toString()) - .setQueryPlan(queryPlan) - .build()); + responseObserver.onNext( + RecordStoreProtocol.GetQueryPlanResponse.newBuilder() + .setQueryPlan(query.toString()) + .setQueryPlan(queryPlan) + .build()); responseObserver.onCompleted(); } catch (RuntimeException e) { log.error(e.getMessage()); diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/SchemaService.java b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/SchemaService.java index 5689a78..650279e 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/SchemaService.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/grpc/SchemaService.java @@ -27,11 +27,10 @@ import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.List; import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class SchemaService extends SchemaServiceGrpc.SchemaServiceImplBase { private static final Logger log = LoggerFactory.getLogger(SchemaService.class); @@ -46,33 +45,47 @@ public SchemaService(RecordLayer recordLayer) { * @param responseObserver */ @Override - public void get(RecordStoreProtocol.GetSchemaRequest request, StreamObserver responseObserver) { + public void get( + RecordStoreProtocol.GetSchemaRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); String recordSpace = GrpcContextKeys.getContainerOrFail(); try { - List indexes = recordLayer.getIndexes(tenantID, recordSpace); + List indexes = + recordLayer.getIndexes(tenantID, recordSpace); RecordMetaData metadataStore = recordLayer.getSchema(tenantID, recordSpace); List records = - ImmutableMap.of(request.getRecordTypeName(), metadataStore.getRecordMetaData().getRecordType(request.getRecordTypeName())) - .entrySet() - .stream() - .map(e -> RecordStoreProtocol.SchemaDescription.newBuilder() - .setName(e.getKey()) - .addAllIndexes(indexes) - .addPrimaryKeyField(e.getValue().getPrimaryKey().toKeyExpression().getField().getFieldName()) - .setSchema(ProtobufReflectionUtil.protoFileDescriptorSet(e.getValue().getDescriptor())) - .build()) - .collect(Collectors.toList()); - - - responseObserver.onNext(RecordStoreProtocol.GetSchemaResponse.newBuilder() - .setSchemas(records.get(0)) - .setVersion(metadataStore.getRecordMetaData().getVersion()) - .build()); + ImmutableMap.of( + request.getRecordTypeName(), + metadataStore.getRecordMetaData().getRecordType(request.getRecordTypeName())) + .entrySet() + .stream() + .map( + e -> + RecordStoreProtocol.SchemaDescription.newBuilder() + .setName(e.getKey()) + .addAllIndexes(indexes) + .addPrimaryKeyField( + e.getValue() + .getPrimaryKey() + .toKeyExpression() + .getField() + .getFieldName()) + .setSchema( + ProtobufReflectionUtil.protoFileDescriptorSet( + e.getValue().getDescriptor())) + .build()) + .collect(Collectors.toList()); + + responseObserver.onNext( + RecordStoreProtocol.GetSchemaResponse.newBuilder() + .setSchemas(records.get(0)) + .setVersion(metadataStore.getRecordMetaData().getVersion()) + .build()); responseObserver.onCompleted(); } catch (RuntimeException e) { log.error(e.getMessage()); @@ -80,18 +93,20 @@ public void get(RecordStoreProtocol.GetSchemaRequest request, StreamObserver responseObserver) { + public void upsert( + RecordStoreProtocol.UpsertSchemaRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); String recordSpace = GrpcContextKeys.getContainerOrFail(); try { - recordLayer.upsertSchema(tenantID, recordSpace, request.getSchema(), request.getRecordTypeIndexDefinitionsList()); + recordLayer.upsertSchema( + tenantID, recordSpace, request.getSchema(), request.getRecordTypeIndexDefinitionsList()); } catch (MetaDataException | Descriptors.DescriptorValidationException e) { log.error(e.getMessage()); throw new StatusRuntimeException(Status.INTERNAL.withDescription(e.getMessage())); @@ -101,22 +116,24 @@ public void upsert(RecordStoreProtocol.UpsertSchemaRequest request, StreamObserv responseObserver.onCompleted(); } - /** * @param request * @param responseObserver */ @Override - public void stat(RecordStoreProtocol.StatRequest request, StreamObserver responseObserver) { + public void stat( + RecordStoreProtocol.StatRequest request, + StreamObserver responseObserver) { String tenantID = GrpcContextKeys.getTenantIDOrFail(); String recordSpace = GrpcContextKeys.getContainerOrFail(); try { Tuple result = recordLayer.getCountAndCountUpdates(tenantID, recordSpace); - responseObserver.onNext(RecordStoreProtocol.StatResponse.newBuilder() - .setCount(result.getLong(0)) - .setCountUpdates(result.getLong(1)) - .build()); + responseObserver.onNext( + RecordStoreProtocol.StatResponse.newBuilder() + .setCount(result.getLong(0)) + .setCountUpdates(result.getLong(1)) + .build()); responseObserver.onCompleted(); } catch (RuntimeException e) { log.error(e.getMessage()); diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/query/GraphQLQueryGenerator.java b/record-store/src/main/java/fr/pierrezemb/recordstore/query/GraphQLQueryGenerator.java index 657084e..d43a83e 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/query/GraphQLQueryGenerator.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/query/GraphQLQueryGenerator.java @@ -20,8 +20,7 @@ public class GraphQLQueryGenerator { public static RecordQuery generate(DataFetchingEnvironment env) { - RecordQuery.Builder queryBuilder = RecordQuery.newBuilder() - .setRecordType("User"); + RecordQuery.Builder queryBuilder = RecordQuery.newBuilder().setRecordType("User"); return queryBuilder.build(); } } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/query/GrpcQueryGenerator.java b/record-store/src/main/java/fr/pierrezemb/recordstore/query/GrpcQueryGenerator.java index e89e27f..9723aff 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/query/GrpcQueryGenerator.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/query/GrpcQueryGenerator.java @@ -24,27 +24,27 @@ import fr.pierrezemb.recordstore.proto.RecordStoreProtocol; import io.grpc.Status; import io.grpc.StatusRuntimeException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.text.ParseException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class GrpcQueryGenerator { private static final Logger LOGGER = LoggerFactory.getLogger(GrpcQueryGenerator.class); public static RecordQuery generate(RecordStoreProtocol.QueryRequest request) { - RecordQuery.Builder queryBuilder = RecordQuery.newBuilder() - .setRecordType(request.getRecordTypeName()); + RecordQuery.Builder queryBuilder = + RecordQuery.newBuilder().setRecordType(request.getRecordTypeName()); if (request.getFieldsToReturnCount() > 0) { - queryBuilder.setRequiredResults(request.getFieldsToReturnList().asByteStringList() - .stream() - .map(e -> Key.Expressions.field(String.valueOf(e.toString()))).collect(Collectors.toList())); + queryBuilder.setRequiredResults( + request.getFieldsToReturnList().asByteStringList().stream() + .map(e -> Key.Expressions.field(String.valueOf(e.toString()))) + .collect(Collectors.toList())); } if (request.hasSortBy()) { @@ -65,7 +65,8 @@ public static RecordQuery generate(RecordStoreProtocol.QueryRequest request) { queryBuilder.setSort(Key.Expressions.field(request.getSortBy().getField()), true); break; case UNRECOGNIZED: - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("cannot recognize sortBy")); + throw new StatusRuntimeException( + Status.INVALID_ARGUMENT.withDescription("cannot recognize sortBy")); } } @@ -78,8 +79,8 @@ public static RecordQuery generate(RecordStoreProtocol.QueryRequest request) { } public static RecordQuery generate(RecordStoreProtocol.DeleteRecordRequest request) { - RecordQuery.Builder queryBuilder = RecordQuery.newBuilder() - .setRecordType(request.getRecordTypeName()); + RecordQuery.Builder queryBuilder = + RecordQuery.newBuilder().setRecordType(request.getRecordTypeName()); try { QueryComponent queryComponents = parseNode(request.getFilter()); @@ -92,14 +93,14 @@ public static RecordQuery generate(RecordStoreProtocol.DeleteRecordRequest reque return queryBuilder.build(); } - public static QueryComponent parseNode(RecordStoreProtocol.QueryFilterNode node) throws ParseException { + public static QueryComponent parseNode(RecordStoreProtocol.QueryFilterNode node) + throws ParseException { if (node == null) { return null; } switch (node.getContentCase()) { - case FIELD_NODE: return parseFieldNode(node.getFieldNode()); case AND_NODE: @@ -115,20 +116,23 @@ public static QueryComponent parseNode(RecordStoreProtocol.QueryFilterNode node) private static QueryComponent handleMapNode(RecordStoreProtocol.QueryFilterMapNode mapNode) { if (mapNode.hasKey() && !mapNode.hasValue()) { - return Query.field(mapNode.getField()).mapMatches(constructFunctionMatcher(mapNode.getKey()), null); + return Query.field(mapNode.getField()) + .mapMatches(constructFunctionMatcher(mapNode.getKey()), null); } if (!mapNode.hasKey() && mapNode.hasValue()) { - return Query.field(mapNode.getField()).mapMatches(null, constructFunctionMatcher(mapNode.getValue())); + return Query.field(mapNode.getField()) + .mapMatches(null, constructFunctionMatcher(mapNode.getValue())); } - return Query.field(mapNode.getField()).mapMatches( - constructFunctionMatcher(mapNode.getKey()), - constructFunctionMatcher(mapNode.getValue()) - ); + return Query.field(mapNode.getField()) + .mapMatches( + constructFunctionMatcher(mapNode.getKey()), + constructFunctionMatcher(mapNode.getValue())); } - private static Function constructFunctionMatcher(RecordStoreProtocol.QueryFilterFieldNode node) { + private static Function constructFunctionMatcher( + RecordStoreProtocol.QueryFilterFieldNode node) { return k -> { try { return switchOnOperations(k, node); @@ -139,8 +143,8 @@ private static Function constructFunctionMatcher(RecordSt }; } - - private static List parseChildrenNodes(RecordStoreProtocol.QueryFilterOrNode node) throws ParseException { + private static List parseChildrenNodes(RecordStoreProtocol.QueryFilterOrNode node) + throws ParseException { List queryComponents = new ArrayList<>(); for (RecordStoreProtocol.QueryFilterNode children : node.getNodesList()) { queryComponents.add(parseNode(children)); @@ -148,7 +152,8 @@ private static List parseChildrenNodes(RecordStoreProtocol.Query return queryComponents; } - private static List parseChildrenNodes(RecordStoreProtocol.QueryFilterAndNode node) throws ParseException { + private static List parseChildrenNodes( + RecordStoreProtocol.QueryFilterAndNode node) throws ParseException { List queryComponents = new ArrayList<>(); for (RecordStoreProtocol.QueryFilterNode children : node.getNodesList()) { queryComponents.add(parseNode(children)); @@ -156,7 +161,8 @@ private static List parseChildrenNodes(RecordStoreProtocol.Query return queryComponents; } - private static QueryComponent parseFieldNode(RecordStoreProtocol.QueryFilterFieldNode node) throws ParseException { + private static QueryComponent parseFieldNode(RecordStoreProtocol.QueryFilterFieldNode node) + throws ParseException { if (node == null) { throw new ParseException("node is null", 0); } @@ -166,47 +172,49 @@ private static QueryComponent parseFieldNode(RecordStoreProtocol.QueryFilterFiel return switchOnOperations(temporaryQuery, node); } - private static QueryComponent switchOnOperations(Field temporaryQuery, RecordStoreProtocol.QueryFilterFieldNode node) throws ParseException { + private static QueryComponent switchOnOperations( + Field temporaryQuery, RecordStoreProtocol.QueryFilterFieldNode node) throws ParseException { switch (node.getOperation()) { case GREATER_THAN_OR_EQUALS: - return node.getIsFieldDefinedAsRepeated() ? - temporaryQuery.oneOfThem().greaterThanOrEquals(parseValue(node)) : - temporaryQuery.greaterThanOrEquals(parseValue(node)); + return node.getIsFieldDefinedAsRepeated() + ? temporaryQuery.oneOfThem().greaterThanOrEquals(parseValue(node)) + : temporaryQuery.greaterThanOrEquals(parseValue(node)); case LESS_THAN_OR_EQUALS: - return node.getIsFieldDefinedAsRepeated() ? - temporaryQuery.oneOfThem().lessThanOrEquals(parseValue(node)) : - temporaryQuery.lessThanOrEquals(parseValue(node)); + return node.getIsFieldDefinedAsRepeated() + ? temporaryQuery.oneOfThem().lessThanOrEquals(parseValue(node)) + : temporaryQuery.lessThanOrEquals(parseValue(node)); case GREATER_THAN: - return node.getIsFieldDefinedAsRepeated() ? - temporaryQuery.oneOfThem().greaterThan(parseValue(node)) : - temporaryQuery.greaterThan(parseValue(node)); + return node.getIsFieldDefinedAsRepeated() + ? temporaryQuery.oneOfThem().greaterThan(parseValue(node)) + : temporaryQuery.greaterThan(parseValue(node)); case LESS_THAN: - return node.getIsFieldDefinedAsRepeated() ? - temporaryQuery.oneOfThem().lessThan(parseValue(node)) : - temporaryQuery.lessThan(parseValue(node)); + return node.getIsFieldDefinedAsRepeated() + ? temporaryQuery.oneOfThem().lessThan(parseValue(node)) + : temporaryQuery.lessThan(parseValue(node)); case START_WITH: - return node.getIsFieldDefinedAsRepeated() ? - temporaryQuery.oneOfThem().startsWith(String.valueOf(parseValue(node))) : - temporaryQuery.startsWith(String.valueOf(parseValue(node))); + return node.getIsFieldDefinedAsRepeated() + ? temporaryQuery.oneOfThem().startsWith(String.valueOf(parseValue(node))) + : temporaryQuery.startsWith(String.valueOf(parseValue(node))); case IS_EMPTY: return Query.field(node.getField()).isEmpty(); case IS_NULL: return Query.field(node.getField()).isNull(); case EQUALS: - return node.getIsFieldDefinedAsRepeated() ? - temporaryQuery.oneOfThem().equalsValue(parseValue(node)) : - temporaryQuery.equalsValue(parseValue(node)); + return node.getIsFieldDefinedAsRepeated() + ? temporaryQuery.oneOfThem().equalsValue(parseValue(node)) + : temporaryQuery.equalsValue(parseValue(node)); case NOT_EQUALS: - return node.getIsFieldDefinedAsRepeated() ? - temporaryQuery.oneOfThem().notEquals(parseValue(node)) : - temporaryQuery.notEquals(parseValue(node)); + return node.getIsFieldDefinedAsRepeated() + ? temporaryQuery.oneOfThem().notEquals(parseValue(node)) + : temporaryQuery.notEquals(parseValue(node)); case NOT_NULL: return Query.field(node.getField()).notNull(); case MATCHES: if (node.getValueCase() != RecordStoreProtocol.QueryFilterFieldNode.ValueCase.FIELDNODE) { throw new ParseException("Matches onl accept a nested FieldValue", 0); } - return Query.field(node.getField()).matches(Objects.requireNonNull(parseFieldNode(node.getFieldNode()))); + return Query.field(node.getField()) + .matches(Objects.requireNonNull(parseFieldNode(node.getFieldNode()))); case TEXT_CONTAINS_ANY: return Query.field(node.getField()).text().containsAny(node.getTokensList()); case TEXT_CONTAINS_ALL: @@ -218,7 +226,8 @@ private static QueryComponent switchOnOperations(Field temporaryQuery, RecordSto } } - private static Object parseValue(RecordStoreProtocol.QueryFilterFieldNode node) throws ParseException { + private static Object parseValue(RecordStoreProtocol.QueryFilterFieldNode node) + throws ParseException { switch (node.getValueCase()) { case STRING_VALUE: return node.getStringValue(); diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoDataFetcher.java b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoDataFetcher.java index 1eb75ed..76e4534 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoDataFetcher.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoDataFetcher.java @@ -23,15 +23,14 @@ import graphql.schema.DataFetchingEnvironment; import graphql.schema.GraphQLEnumType; import graphql.schema.GraphQLType; - import java.lang.reflect.Method; import java.util.Map; final class ProtoDataFetcher implements DataFetcher { private static final Converter UNDERSCORE_TO_CAMEL = - CaseFormat.LOWER_UNDERSCORE.converterTo(CaseFormat.LOWER_CAMEL); + CaseFormat.LOWER_UNDERSCORE.converterTo(CaseFormat.LOWER_CAMEL); private static final Converter LOWER_CAMEL_TO_UPPER = - CaseFormat.LOWER_CAMEL.converterTo(CaseFormat.UPPER_CAMEL); + CaseFormat.LOWER_CAMEL.converterTo(CaseFormat.UPPER_CAMEL); private final Descriptors.FieldDescriptor fieldDescriptor; private final String convertedFieldName; @@ -41,7 +40,7 @@ final class ProtoDataFetcher implements DataFetcher { this.fieldDescriptor = fieldDescriptor; final String fieldName = fieldDescriptor.getName(); convertedFieldName = - fieldName.contains("_") ? UNDERSCORE_TO_CAMEL.convert(fieldName) : fieldName; + fieldName.contains("_") ? UNDERSCORE_TO_CAMEL.convert(fieldName) : fieldName; } @Override @@ -66,9 +65,9 @@ public Object get(DataFetchingEnvironment environment) throws Exception { if (method == null) { // no synchronization necessary because this line is idempotent final String methodNameSuffix = - fieldDescriptor.isMapField() ? "Map" : fieldDescriptor.isRepeated() ? "List" : ""; + fieldDescriptor.isMapField() ? "Map" : fieldDescriptor.isRepeated() ? "List" : ""; final String methodName = - "get" + LOWER_CAMEL_TO_UPPER.convert(convertedFieldName) + methodNameSuffix; + "get" + LOWER_CAMEL_TO_UPPER.convert(convertedFieldName) + methodNameSuffix; method = source.getClass().getMethod(methodName); } return method.invoke(source); diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoScalars.java b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoScalars.java index ad3fbe1..a381ae6 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoScalars.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoScalars.java @@ -36,98 +36,97 @@ public final class ProtoScalars { public static final GraphQLScalarType UINT_32 = - GraphQLScalarType.newScalar(Scalars.GraphQLInt) - .name("UInt32") - .description("Scalar for proto type uint32. Uses variable-length encoding.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLInt) + .name("UInt32") + .description("Scalar for proto type uint32. Uses variable-length encoding.") + .build(); public static final GraphQLScalarType UINT_64 = - GraphQLScalarType.newScalar(Scalars.GraphQLLong) - .name("UInt64") - .description("Scalar for proto type uint64. Uses variable-length encoding.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLLong) + .name("UInt64") + .description("Scalar for proto type uint64. Uses variable-length encoding.") + .build(); public static final GraphQLScalarType SINT_32 = - GraphQLScalarType.newScalar(Scalars.GraphQLInt) - .name("SInt32") - .description( - "Scalar for proto type sint32. Uses variable-length encoding." - + " Signed int value. These more efficiently encode negative numbers than regular int32s.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLInt) + .name("SInt32") + .description( + "Scalar for proto type sint32. Uses variable-length encoding." + + " Signed int value. These more efficiently encode negative numbers than regular int32s.") + .build(); public static final GraphQLScalarType SINT_64 = - GraphQLScalarType.newScalar(Scalars.GraphQLLong) - .name("SInt64") - .description( - "Scalar for proto type sint64. Uses variable-length encoding. Signed int value." - + " These more efficiently encode negative numbers than regular int64s.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLLong) + .name("SInt64") + .description( + "Scalar for proto type sint64. Uses variable-length encoding. Signed int value." + + " These more efficiently encode negative numbers than regular int64s.") + .build(); public static final GraphQLScalarType FIXED_32 = - GraphQLScalarType.newScalar(Scalars.GraphQLLong) - .name("Fixed32") - .description( - "Scalar for proto type fixed32. Always four bytes." - + " More efficient than uint32 if values are often greater than 2^28.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLLong) + .name("Fixed32") + .description( + "Scalar for proto type fixed32. Always four bytes." + + " More efficient than uint32 if values are often greater than 2^28.") + .build(); public static final GraphQLScalarType FIXED_64 = - GraphQLScalarType.newScalar(Scalars.GraphQLLong) - .name("Fixed64") - .description( - "Scalar for proto type fixed64. Always eight bytes." - + " More efficient than uint64 if values are often greater than 2^56.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLLong) + .name("Fixed64") + .description( + "Scalar for proto type fixed64. Always eight bytes." + + " More efficient than uint64 if values are often greater than 2^56.") + .build(); public static final GraphQLScalarType S_FIXED_32 = - GraphQLScalarType.newScalar(Scalars.GraphQLInt) - .name("SFixed32") - .description("Scalar for proto type sfixed32. Always four bytes.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLInt) + .name("SFixed32") + .description("Scalar for proto type sfixed32. Always four bytes.") + .build(); public static final GraphQLScalarType S_FIXED_64 = - GraphQLScalarType.newScalar(Scalars.GraphQLLong) - .name("SFixed64") - .description("Scalar for proto type sfixed64. Always eight bytes.") - .build(); + GraphQLScalarType.newScalar(Scalars.GraphQLLong) + .name("SFixed64") + .description("Scalar for proto type sfixed64. Always eight bytes.") + .build(); public static final GraphQLScalarType BYTES = - GraphQLScalarType.newScalar() - .coercing( - new Coercing() { - @Override - public ByteString serialize(Object dataFetcherResult) - throws CoercingSerializeException { - if (dataFetcherResult instanceof ByteString) { - return (ByteString) dataFetcherResult; - } else { - throw new CoercingSerializeException( - "Invalid value '" + dataFetcherResult + "' for Bytes"); - } - } + GraphQLScalarType.newScalar() + .coercing( + new Coercing() { + @Override + public ByteString serialize(Object dataFetcherResult) + throws CoercingSerializeException { + if (dataFetcherResult instanceof ByteString) { + return (ByteString) dataFetcherResult; + } else { + throw new CoercingSerializeException( + "Invalid value '" + dataFetcherResult + "' for Bytes"); + } + } - @Override - public ByteString parseValue(Object input) throws CoercingParseValueException { - if (input instanceof String) { - ByteString result = ByteString.copyFromUtf8((String) input); - if (result == null) { - throw new CoercingParseValueException( - "Invalid value '" + input + "' for Bytes"); - } - return result; - } - if (input instanceof ByteString) { - return (ByteString) input; - } - throw new CoercingParseValueException("Invalid value '" + input + "' for Bytes"); - } + @Override + public ByteString parseValue(Object input) throws CoercingParseValueException { + if (input instanceof String) { + ByteString result = ByteString.copyFromUtf8((String) input); + if (result == null) { + throw new CoercingParseValueException( + "Invalid value '" + input + "' for Bytes"); + } + return result; + } + if (input instanceof ByteString) { + return (ByteString) input; + } + throw new CoercingParseValueException("Invalid value '" + input + "' for Bytes"); + } - @Override - public ByteString parseLiteral(Object input) throws CoercingParseLiteralException { - if (input instanceof StringValue) { - return ByteString.copyFromUtf8(((StringValue) input).getValue()); - } - return null; - } - }) - .name("Bytes") - .description( - "Scalar for proto type bytes." - + " May contain any arbitrary sequence of bytes no longer than 2^32.") - .build(); + @Override + public ByteString parseLiteral(Object input) throws CoercingParseLiteralException { + if (input instanceof StringValue) { + return ByteString.copyFromUtf8(((StringValue) input).getValue()); + } + return null; + } + }) + .name("Bytes") + .description( + "Scalar for proto type bytes." + + " May contain any arbitrary sequence of bytes no longer than 2^32.") + .build(); - private ProtoScalars() { - } + private ProtoScalars() {} } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoToGql.java b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoToGql.java index ac2b12e..e9cb5e4 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoToGql.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/graphql/ProtoToGql.java @@ -13,22 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package fr.pierrezemb.recordstore.utils.graphql; +import static com.google.common.collect.ImmutableList.toImmutableList; +import static graphql.Scalars.GraphQLString; +import static graphql.schema.GraphQLFieldDefinition.newFieldDefinition; + import com.google.common.base.CharMatcher; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -48,78 +38,69 @@ import graphql.schema.GraphQLScalarType; import graphql.schema.GraphQLTypeReference; -import static com.google.common.collect.ImmutableList.toImmutableList; -import static graphql.Scalars.GraphQLString; -import static graphql.schema.GraphQLFieldDefinition.newFieldDefinition; - -/** - * Converts Protos to GraphQL Types. - */ +/** Converts Protos to GraphQL Types. */ public final class ProtoToGql { private static final ImmutableMap PROTO_TYPE_MAP = - new ImmutableMap.Builder() - .put(Type.BOOL, Scalars.GraphQLBoolean) - .put(Type.FLOAT, Scalars.GraphQLFloat) - .put(Type.INT32, Scalars.GraphQLInt) - .put(Type.INT64, Scalars.GraphQLLong) - .put(Type.STRING, Scalars.GraphQLString) - .put(Type.DOUBLE, Scalars.GraphQLFloat) - .put(Type.UINT32, ProtoScalars.UINT_32) - .put(Type.UINT64, ProtoScalars.UINT_64) - .put(Type.SINT32, ProtoScalars.SINT_32) - .put(Type.SINT64, ProtoScalars.SINT_64) - .put(Type.BYTES, ProtoScalars.BYTES) - .put(Type.FIXED32, ProtoScalars.FIXED_32) - .put(Type.FIXED64, ProtoScalars.FIXED_64) - .put(Type.SFIXED32, ProtoScalars.S_FIXED_32) - .put(Type.SFIXED64, ProtoScalars.S_FIXED_64) - .build(); + new ImmutableMap.Builder() + .put(Type.BOOL, Scalars.GraphQLBoolean) + .put(Type.FLOAT, Scalars.GraphQLFloat) + .put(Type.INT32, Scalars.GraphQLInt) + .put(Type.INT64, Scalars.GraphQLLong) + .put(Type.STRING, Scalars.GraphQLString) + .put(Type.DOUBLE, Scalars.GraphQLFloat) + .put(Type.UINT32, ProtoScalars.UINT_32) + .put(Type.UINT64, ProtoScalars.UINT_64) + .put(Type.SINT32, ProtoScalars.SINT_32) + .put(Type.SINT64, ProtoScalars.SINT_64) + .put(Type.BYTES, ProtoScalars.BYTES) + .put(Type.FIXED32, ProtoScalars.FIXED_32) + .put(Type.FIXED64, ProtoScalars.FIXED_64) + .put(Type.SFIXED32, ProtoScalars.S_FIXED_32) + .put(Type.SFIXED64, ProtoScalars.S_FIXED_64) + .build(); private static final ImmutableMap TYPE_MAP = - new ImmutableMap.Builder() - .put(Type.BOOL, Scalars.GraphQLBoolean) - .put(Type.FLOAT, Scalars.GraphQLFloat) - .put(Type.INT32, Scalars.GraphQLInt) - .put(Type.INT64, Scalars.GraphQLLong) - .put(Type.STRING, Scalars.GraphQLString) - .put(Type.DOUBLE, Scalars.GraphQLFloat) - .put(Type.UINT32, Scalars.GraphQLInt) - .put(Type.UINT64, Scalars.GraphQLLong) - .put(Type.SINT32, Scalars.GraphQLInt) - .put(Type.SINT64, Scalars.GraphQLLong) - .put(Type.BYTES, Scalars.GraphQLString) - .put(Type.FIXED32, Scalars.GraphQLInt) - .put(Type.FIXED64, Scalars.GraphQLLong) - .put(Type.SFIXED32, Scalars.GraphQLInt) - .put(Type.SFIXED64, Scalars.GraphQLLong) - .build(); + new ImmutableMap.Builder() + .put(Type.BOOL, Scalars.GraphQLBoolean) + .put(Type.FLOAT, Scalars.GraphQLFloat) + .put(Type.INT32, Scalars.GraphQLInt) + .put(Type.INT64, Scalars.GraphQLLong) + .put(Type.STRING, Scalars.GraphQLString) + .put(Type.DOUBLE, Scalars.GraphQLFloat) + .put(Type.UINT32, Scalars.GraphQLInt) + .put(Type.UINT64, Scalars.GraphQLLong) + .put(Type.SINT32, Scalars.GraphQLInt) + .put(Type.SINT64, Scalars.GraphQLLong) + .put(Type.BYTES, Scalars.GraphQLString) + .put(Type.FIXED32, Scalars.GraphQLInt) + .put(Type.FIXED64, Scalars.GraphQLLong) + .put(Type.SFIXED32, Scalars.GraphQLInt) + .put(Type.SFIXED64, Scalars.GraphQLLong) + .build(); private static final ImmutableList STATIC_FIELD = - ImmutableList.of(newFieldDefinition().type(GraphQLString).name("_").staticValue("-").build()); + ImmutableList.of(newFieldDefinition().type(GraphQLString).name("_").staticValue("-").build()); - private ProtoToGql() { - } + private ProtoToGql() {} public static GraphQLFieldDefinition convertField( - FieldDescriptor fieldDescriptor, SchemaOptions schemaOptions) { + FieldDescriptor fieldDescriptor, SchemaOptions schemaOptions) { DataFetcher dataFetcher = new ProtoDataFetcher(fieldDescriptor); GraphQLFieldDefinition.Builder builder = - newFieldDefinition() - .type(convertType(fieldDescriptor, schemaOptions)) - .dataFetcher(dataFetcher) - .name(fieldDescriptor.getJsonName()); + newFieldDefinition() + .type(convertType(fieldDescriptor, schemaOptions)) + .dataFetcher(dataFetcher) + .name(fieldDescriptor.getJsonName()); builder.description(schemaOptions.commentsMap().get(fieldDescriptor.getFullName())); if (fieldDescriptor.getOptions().hasDeprecated() - && fieldDescriptor.getOptions().getDeprecated()) { + && fieldDescriptor.getOptions().getDeprecated()) { builder.deprecate("deprecated in proto"); } return builder.build(); } - /** - * Returns a GraphQLOutputType generated from a FieldDescriptor. - */ + /** Returns a GraphQLOutputType generated from a FieldDescriptor. */ public static GraphQLOutputType convertType( - FieldDescriptor fieldDescriptor, SchemaOptions schemaOptions) { + FieldDescriptor fieldDescriptor, SchemaOptions schemaOptions) { final GraphQLOutputType type; if (fieldDescriptor.getType() == Type.MESSAGE) { @@ -145,44 +126,37 @@ public static GraphQLOutputType convertType( } } - public static GraphQLObjectType convert( - Descriptor descriptor, - SchemaOptions schemaOptions) { + public static GraphQLObjectType convert(Descriptor descriptor, SchemaOptions schemaOptions) { ImmutableList graphQLFieldDefinitions = - descriptor.getFields().stream() - .map(field -> ProtoToGql.convertField(field, schemaOptions)) - .collect(toImmutableList()); + descriptor.getFields().stream() + .map(field -> ProtoToGql.convertField(field, schemaOptions)) + .collect(toImmutableList()); return GraphQLObjectType.newObject() - .name(getReferenceName(descriptor)) - .description(schemaOptions.commentsMap().get(descriptor.getFullName())) - .fields(graphQLFieldDefinitions.isEmpty() ? STATIC_FIELD : graphQLFieldDefinitions) - .build(); + .name(getReferenceName(descriptor)) + .description(schemaOptions.commentsMap().get(descriptor.getFullName())) + .fields(graphQLFieldDefinitions.isEmpty() ? STATIC_FIELD : graphQLFieldDefinitions) + .build(); } - static GraphQLEnumType convert( - EnumDescriptor descriptor, SchemaOptions schemaOptions) { + static GraphQLEnumType convert(EnumDescriptor descriptor, SchemaOptions schemaOptions) { GraphQLEnumType.Builder builder = GraphQLEnumType.newEnum().name(getReferenceName(descriptor)); for (EnumValueDescriptor value : descriptor.getValues()) { builder.value( - value.getName(), - value.getName(), - schemaOptions.commentsMap().get(value.getFullName()), - value.getOptions().getDeprecated() ? "deprecated in proto" : null); + value.getName(), + value.getName(), + schemaOptions.commentsMap().get(value.getFullName()), + value.getOptions().getDeprecated() ? "deprecated in proto" : null); } return builder.build(); } - /** - * Returns the GraphQL name of the supplied proto. - */ + /** Returns the GraphQL name of the supplied proto. */ static String getReferenceName(GenericDescriptor descriptor) { return CharMatcher.anyOf(".").replaceFrom(descriptor.getFullName(), "_"); } - /** - * Returns a reference to the GraphQL type corresponding to the supplied proto. - */ + /** Returns a reference to the GraphQL type corresponding to the supplied proto. */ static GraphQLTypeReference getReference(GenericDescriptor descriptor) { return new GraphQLTypeReference(getReferenceName(descriptor)); } diff --git a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/protobuf/ProtobufReflectionUtil.java b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/protobuf/ProtobufReflectionUtil.java index 9be822a..82406bd 100644 --- a/record-store/src/main/java/fr/pierrezemb/recordstore/utils/protobuf/ProtobufReflectionUtil.java +++ b/record-store/src/main/java/fr/pierrezemb/recordstore/utils/protobuf/ProtobufReflectionUtil.java @@ -21,25 +21,23 @@ import com.google.protobuf.Descriptors.FileDescriptor; import com.google.protobuf.Message; import com.google.protobuf.Parser; - import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.HashSet; import java.util.Set; public final class ProtobufReflectionUtil { - private ProtobufReflectionUtil() { - } + private ProtobufReflectionUtil() {} @SuppressWarnings("unchecked") public static Parser protobufParser(Class messageClass) { Object parser = getParserFromGeneratedMessage(messageClass); if (!(parser instanceof Parser)) { throw new IllegalStateException( - "was expecting a protobuf parser to be return from the static parser() method on the type " - + messageClass - + " but instead got " - + parser); + "was expecting a protobuf parser to be return from the static parser() method on the type " + + messageClass + + " but instead got " + + parser); } return (Parser) parser; } @@ -56,16 +54,14 @@ public static FileDescriptorSet protoFileDescriptorSet(Descriptor descriptor) { return fileDescriptorSet.build(); } - /** - * extract the {@linkplain Descriptor} for the generated message type. - */ + /** extract the {@linkplain Descriptor} for the generated message type. */ static Descriptor protobufDescriptor(Class type) { try { Method getDescriptor = type.getDeclaredMethod("getDescriptor"); return (Descriptor) getDescriptor.invoke(type); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { throw new IllegalStateException( - "unable to obtain protobuf type fileDescriptorSet for " + type, e); + "unable to obtain protobuf type fileDescriptorSet for " + type, e); } } @@ -83,7 +79,7 @@ private static Object getParserFromGeneratedMessage(Class } private static void addDependenciesRecursively( - Set visited, FileDescriptor descriptor) { + Set visited, FileDescriptor descriptor) { for (FileDescriptor dependency : descriptor.getDependencies()) { if (visited.add(dependency)) { addDependenciesRecursively(visited, dependency.getFile()); diff --git a/record-store/src/main/proto/managed_kv.proto b/record-store/src/main/proto/managed_kv.proto new file mode 100644 index 0000000..16b4b2a --- /dev/null +++ b/record-store/src/main/proto/managed_kv.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package managed_kv; + +option java_package = "fr.pierrezemb.recordstore.proto.managed.kv"; +option java_outer_classname = "ManagedKVProto"; + +service ManagedKV { + rpc put(KeyValue) returns (EmptyResponse); + rpc delete(DeleteRequest) returns (EmptyResponse); + rpc scan(ScanRequest) returns (stream KeyValue); +} + +// gRPC part +message EmptyResponse {} + +message DeleteRequest { + bytes key_to_delete = 1; +} + +message ScanRequest { + bytes start_key = 1; + bytes end_key = 2; +} + +// Record-layer part +message KeyValue { + bytes key = 1; + bytes value = 2; +} + +message RecordTypeUnion { + KeyValue _KeyValue = 1; +} diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/GraphQLVerticleTest.java b/record-store/src/test/java/fr/pierrezemb/recordstore/GraphQLVerticleTest.java index 1726c72..81a08a5 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/GraphQLVerticleTest.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/GraphQLVerticleTest.java @@ -15,6 +15,10 @@ */ package fr.pierrezemb.recordstore; +import static fr.pierrezemb.recordstore.datasets.DatasetsLoader.DEFAULT_DEMO_TENANT; +import static io.vertx.junit5.web.TestRequest.bodyResponse; +import static io.vertx.junit5.web.TestRequest.testRequest; + import com.apple.foundationdb.record.RecordMetaData; import fr.pierrezemb.recordstore.auth.BiscuitManager; import fr.pierrezemb.recordstore.datasets.DatasetsLoader; @@ -31,6 +35,11 @@ import io.vertx.junit5.VertxTestContext; import io.vertx.junit5.web.VertxWebClientExtension; import io.vertx.junit5.web.WebClientOptionsInject; +import java.io.File; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import javax.crypto.spec.SecretKeySpec; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -38,49 +47,43 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.testcontainers.containers.AbstractFDBContainer; -import javax.crypto.spec.SecretKeySpec; -import java.io.File; -import java.util.Collections; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -import static fr.pierrezemb.recordstore.datasets.DatasetsLoader.DEFAULT_DEMO_TENANT; -import static io.vertx.junit5.web.TestRequest.bodyResponse; -import static io.vertx.junit5.web.TestRequest.testRequest; - -@ExtendWith({ - VertxExtension.class, - VertxWebClientExtension.class -}) +@ExtendWith({VertxExtension.class, VertxWebClientExtension.class}) @TestInstance(TestInstance.Lifecycle.PER_CLASS) class GraphQLVerticleTest extends AbstractFDBContainer { public final int port = PortManager.nextFreePort(); + @WebClientOptionsInject - public WebClientOptions opts = new WebClientOptions() - .setDefaultPort(port) - .setDefaultHost("localhost"); + public WebClientOptions opts = + new WebClientOptions().setDefaultPort(port).setDefaultHost("localhost"); + private File clusterFile; private RecordLayer recordLayer; @BeforeAll - void deploy_verticle(Vertx vertx, VertxTestContext testContext) throws InterruptedException, TimeoutException, ExecutionException { + void deploy_verticle(Vertx vertx, VertxTestContext testContext) + throws InterruptedException, TimeoutException, ExecutionException { clusterFile = container.getClusterFile(); SecretKeySpec secretKey = new SecretKeySpec(Constants.CONFIG_ENCRYPTION_KEY.getBytes(), "AES"); - recordLayer = new RecordLayer(clusterFile.getAbsolutePath(), vertx.isMetricsEnabled(), secretKey); + recordLayer = + new RecordLayer(clusterFile.getAbsolutePath(), vertx.isMetricsEnabled(), secretKey); - DeploymentOptions options = new DeploymentOptions() - .setConfig(new JsonObject() - .put(Constants.CONFIG_LOAD_DEMO, "User") - .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) - .put(Constants.CONFIG_GRAPHQL_LISTEN_PORT, port)); + DeploymentOptions options = + new DeploymentOptions() + .setConfig( + new JsonObject() + .put(Constants.CONFIG_LOAD_DEMO, "User") + .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) + .put(Constants.CONFIG_GRAPHQL_LISTEN_PORT, port)); BiscuitManager biscuitManager = new BiscuitManager(); - String sealedBiscuit = biscuitManager.create(DatasetsLoader.DEFAULT_DEMO_TENANT, Collections.emptyList()); + String sealedBiscuit = + biscuitManager.create(DatasetsLoader.DEFAULT_DEMO_TENANT, Collections.emptyList()); System.out.println(sealedBiscuit); // deploy verticle - vertx.deployVerticle(new GraphQLVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); + vertx.deployVerticle( + new GraphQLVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); } @Test @@ -88,10 +91,12 @@ public void getSchema(WebClient client, VertxTestContext testContext) throws Exc RecordMetaData metadata = this.recordLayer.getSchema(DEFAULT_DEMO_TENANT, "USER"); String schema = GraphQLSchemaGenerator.generate(metadata); System.out.println(schema); - testRequest(client, HttpMethod.GET, "/api/v0/" + DatasetsLoader.DEFAULT_DEMO_TENANT + "/" + "USER" + "/schema") - .expect( - bodyResponse(Buffer.buffer(schema), "text/plain") - ).send(testContext); + testRequest( + client, + HttpMethod.GET, + "/api/v0/" + DatasetsLoader.DEFAULT_DEMO_TENANT + "/" + "USER" + "/schema") + .expect(bodyResponse(Buffer.buffer(schema), "text/plain")) + .send(testContext); } @AfterAll diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTest.java b/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTest.java index e579e63..50f41b3 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTest.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTest.java @@ -15,6 +15,8 @@ */ package fr.pierrezemb.recordstore; +import static org.junit.Assert.assertEquals; + import com.google.protobuf.DescriptorProtos; import com.google.protobuf.InvalidProtocolBufferException; import fr.pierrezemb.recordstore.auth.BiscuitClientCredential; @@ -31,6 +33,10 @@ import io.vertx.grpc.VertxChannelBuilder; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -38,13 +44,6 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.testcontainers.containers.AbstractFDBContainer; -import java.io.File; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertEquals; - @ExtendWith(VertxExtension.class) @TestInstance(TestInstance.Lifecycle.PER_CLASS) public class GrpcVerticleTest extends AbstractFDBContainer { @@ -61,294 +60,352 @@ void deploy_verticle(Vertx vertx, VertxTestContext testContext) { clusterFile = container.getClusterFile(); - DeploymentOptions options = new DeploymentOptions() - .setConfig(new JsonObject() - .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) - .put(Constants.CONFIG_GRPC_LISTEN_PORT, port)); + DeploymentOptions options = + new DeploymentOptions() + .setConfig( + new JsonObject() + .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) + .put(Constants.CONFIG_GRPC_LISTEN_PORT, port)); BiscuitManager biscuitManager = new BiscuitManager(); String sealedBiscuit = biscuitManager.create(DEFAULT_TENANT, Collections.emptyList()); System.out.println(sealedBiscuit); - BiscuitClientCredential credentials = new BiscuitClientCredential(DEFAULT_TENANT, sealedBiscuit, this.getClass().getName()); + BiscuitClientCredential credentials = + new BiscuitClientCredential(DEFAULT_TENANT, sealedBiscuit, this.getClass().getName()); // deploy verticle - vertx.deployVerticle(new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); - ManagedChannel channel = VertxChannelBuilder - .forAddress(vertx, "localhost", port) - .usePlaintext(true) - .build(); - - schemaServiceVertxStub = SchemaServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); - recordServiceVertxStub = RecordServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); + vertx.deployVerticle( + new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); + ManagedChannel channel = + VertxChannelBuilder.forAddress(vertx, "localhost", port).usePlaintext(true).build(); + + schemaServiceVertxStub = + SchemaServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); + recordServiceVertxStub = + RecordServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); } @Test public void testCreateSchema(Vertx vertx, VertxTestContext testContext) throws Exception { DescriptorProtos.FileDescriptorSet dependencies = - ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); - - RecordStoreProtocol.UpsertSchemaRequest request = RecordStoreProtocol.UpsertSchemaRequest - .newBuilder() - .setSchema(dependencies) - .addRecordTypeIndexDefinitions( - RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() - .setName("User") - .addPrimaryKeyFields("id") - .addIndexDefinitions(RecordStoreProtocol.IndexDefinition.newBuilder() - .setIndexType(RecordStoreProtocol.IndexType.VERSION) - .build()) - .build() - ) - .build(); - - schemaServiceVertxStub.upsert(request, response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); + + RecordStoreProtocol.UpsertSchemaRequest request = + RecordStoreProtocol.UpsertSchemaRequest.newBuilder() + .setSchema(dependencies) + .addRecordTypeIndexDefinitions( + RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() + .setName("User") + .addPrimaryKeyFields("id") + .addIndexDefinitions( + RecordStoreProtocol.IndexDefinition.newBuilder() + .setIndexType(RecordStoreProtocol.IndexType.VERSION) + .build()) + .build()) + .build(); + + schemaServiceVertxStub.upsert( + request, + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testPut1(Vertx vertx, VertxTestContext testContext) throws Exception { - DemoUserProto.User person = DemoUserProto.User.newBuilder() - .setId(1) - .setName("PierreZ") - .setEmail("toto@example.com") - .build(); - - RecordStoreProtocol.PutRecordRequest request = RecordStoreProtocol.PutRecordRequest.newBuilder() - .setRecordTypeName("User") - .setMessage(person.toByteString()) - .build(); - - recordServiceVertxStub.put(request, response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + DemoUserProto.User person = + DemoUserProto.User.newBuilder() + .setId(1) + .setName("PierreZ") + .setEmail("toto@example.com") + .build(); + + RecordStoreProtocol.PutRecordRequest request = + RecordStoreProtocol.PutRecordRequest.newBuilder() + .setRecordTypeName("User") + .setMessage(person.toByteString()) + .build(); + + recordServiceVertxStub.put( + request, + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testPut2(Vertx vertx, VertxTestContext testContext) throws Exception { - RecordStoreProtocol.StatRequest recordRequest = RecordStoreProtocol.StatRequest.newBuilder() - .build(); - - schemaServiceVertxStub.stat(recordRequest, response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - System.out.println("there is " + response.result().getCount() + " records"); - System.out.println("there is " + response.result().getCountUpdates() + " updates"); - assertEquals(1, response.result().getCount()); - assertEquals(1, response.result().getCountUpdates()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + RecordStoreProtocol.StatRequest recordRequest = + RecordStoreProtocol.StatRequest.newBuilder().build(); + + schemaServiceVertxStub.stat( + recordRequest, + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + System.out.println("there is " + response.result().getCount() + " records"); + System.out.println("there is " + response.result().getCountUpdates() + " updates"); + assertEquals(1, response.result().getCount()); + assertEquals(1, response.result().getCountUpdates()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testPut3(Vertx vertx, VertxTestContext testContext) throws Exception { - RecordStoreProtocol.QueryFilterNode query = RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(2) - .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) - .build()) - .build(); - - RecordStoreProtocol.QueryRequest request = RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(query) - .build(); - - recordServiceVertxStub.query(request, response -> { - response.handler(req -> { - System.out.println("received a response"); - DemoUserProto.User p = null; - try { - p = DemoUserProto.User.parseFrom(req.getRecord()); - assertEquals("PierreZ", p.getName()); - assertEquals("toto@example.com", p.getEmail()); - assertEquals(1, p.getId()); - } catch (InvalidProtocolBufferException e) { - testContext.failNow(e); - e.printStackTrace(); - } - }); - response.endHandler(end -> testContext.completeNow()); - response.exceptionHandler(testContext::failNow); - }); + RecordStoreProtocol.QueryFilterNode query = + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(2) + .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) + .build()) + .build(); + + RecordStoreProtocol.QueryRequest request = + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter(query) + .build(); + + recordServiceVertxStub.query( + request, + response -> { + response.handler( + req -> { + System.out.println("received a response"); + DemoUserProto.User p = null; + try { + p = DemoUserProto.User.parseFrom(req.getRecord()); + assertEquals("PierreZ", p.getName()); + assertEquals("toto@example.com", p.getEmail()); + assertEquals(1, p.getId()); + } catch (InvalidProtocolBufferException e) { + testContext.failNow(e); + e.printStackTrace(); + } + }); + response.endHandler(end -> testContext.completeNow()); + response.exceptionHandler(testContext::failNow); + }); } @Test public void testPut4(Vertx vertx, VertxTestContext testContext) throws Exception { - RecordStoreProtocol.QueryFilterAndNode andNode = RecordStoreProtocol.QueryFilterAndNode.newBuilder() - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(2) - .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) - .build()).build()) - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(1) - .setOperation(RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS) - .build()).build()) - .build(); - - RecordStoreProtocol.QueryFilterNode query = RecordStoreProtocol.QueryFilterNode.newBuilder() - .setAndNode(andNode) - .build(); - - RecordStoreProtocol.QueryRequest request = RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(query) - .build(); - - recordServiceVertxStub.query(request, response -> { - response.handler(req -> { - System.out.println("received a response"); - DemoUserProto.User p = null; - try { - p = DemoUserProto.User.parseFrom(req.getRecord()); - assertEquals("PierreZ", p.getName()); - assertEquals("toto@example.com", p.getEmail()); - assertEquals(1, p.getId()); - } catch (InvalidProtocolBufferException e) { - testContext.failNow(e); - e.printStackTrace(); - } - }); - response.endHandler(end -> testContext.completeNow()); - response.exceptionHandler(testContext::failNow); - }); + RecordStoreProtocol.QueryFilterAndNode andNode = + RecordStoreProtocol.QueryFilterAndNode.newBuilder() + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(2) + .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) + .build()) + .build()) + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(1) + .setOperation( + RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS) + .build()) + .build()) + .build(); + + RecordStoreProtocol.QueryFilterNode query = + RecordStoreProtocol.QueryFilterNode.newBuilder().setAndNode(andNode).build(); + + RecordStoreProtocol.QueryRequest request = + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter(query) + .build(); + + recordServiceVertxStub.query( + request, + response -> { + response.handler( + req -> { + System.out.println("received a response"); + DemoUserProto.User p = null; + try { + p = DemoUserProto.User.parseFrom(req.getRecord()); + assertEquals("PierreZ", p.getName()); + assertEquals("toto@example.com", p.getEmail()); + assertEquals(1, p.getId()); + } catch (InvalidProtocolBufferException e) { + testContext.failNow(e); + e.printStackTrace(); + } + }); + response.endHandler(end -> testContext.completeNow()); + response.exceptionHandler(testContext::failNow); + }); } @Test public void testPut5(Vertx vertx, VertxTestContext testContext) throws Exception { - RecordStoreProtocol.QueryFilterAndNode andNode = RecordStoreProtocol.QueryFilterAndNode.newBuilder() - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(2) - .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) - .build()).build()) - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(1) - .setOperation(RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS) - .build()).build()) - .build(); - - RecordStoreProtocol.QueryFilterNode query = RecordStoreProtocol.QueryFilterNode.newBuilder() - .setAndNode(andNode) - .build(); - - RecordStoreProtocol.DeleteRecordRequest request = RecordStoreProtocol.DeleteRecordRequest.newBuilder() - .setFilter(query) - .setRecordTypeName("User") - .build(); - - recordServiceVertxStub.delete(request, response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - assertEquals(1, response.result().getDeletedCount()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + RecordStoreProtocol.QueryFilterAndNode andNode = + RecordStoreProtocol.QueryFilterAndNode.newBuilder() + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(2) + .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) + .build()) + .build()) + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(1) + .setOperation( + RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS) + .build()) + .build()) + .build(); + + RecordStoreProtocol.QueryFilterNode query = + RecordStoreProtocol.QueryFilterNode.newBuilder().setAndNode(andNode).build(); + + RecordStoreProtocol.DeleteRecordRequest request = + RecordStoreProtocol.DeleteRecordRequest.newBuilder() + .setFilter(query) + .setRecordTypeName("User") + .build(); + + recordServiceVertxStub.delete( + request, + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + assertEquals(1, response.result().getDeletedCount()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testPut6(Vertx vertx, VertxTestContext testContext) throws Exception { - RecordStoreProtocol.QueryFilterAndNode andNode = RecordStoreProtocol.QueryFilterAndNode.newBuilder() - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(2) - .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) - .build()).build()) - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(1) - .setOperation(RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS) - .build()).build()) - .build(); - - RecordStoreProtocol.QueryFilterNode query = RecordStoreProtocol.QueryFilterNode.newBuilder() - .setAndNode(andNode) - .build(); - - RecordStoreProtocol.QueryRequest request = RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(query) - .build(); - - recordServiceVertxStub.query(request, response -> { - List results = new ArrayList<>(); - response.handler(req -> { - System.out.println("received a response"); - DemoUserProto.User p = null; - try { - p = DemoUserProto.User.parseFrom(req.getRecord()); - results.add(p); - } catch (InvalidProtocolBufferException e) { - testContext.failNow(e); - e.printStackTrace(); - } - }); - response.endHandler(end -> { - assertEquals(0, results.size()); - testContext.completeNow(); - }); - response.exceptionHandler(testContext::failNow); - }); + RecordStoreProtocol.QueryFilterAndNode andNode = + RecordStoreProtocol.QueryFilterAndNode.newBuilder() + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(2) + .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) + .build()) + .build()) + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(1) + .setOperation( + RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS) + .build()) + .build()) + .build(); + + RecordStoreProtocol.QueryFilterNode query = + RecordStoreProtocol.QueryFilterNode.newBuilder().setAndNode(andNode).build(); + + RecordStoreProtocol.QueryRequest request = + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter(query) + .build(); + + recordServiceVertxStub.query( + request, + response -> { + List results = new ArrayList<>(); + response.handler( + req -> { + System.out.println("received a response"); + DemoUserProto.User p = null; + try { + p = DemoUserProto.User.parseFrom(req.getRecord()); + results.add(p); + } catch (InvalidProtocolBufferException e) { + testContext.failNow(e); + e.printStackTrace(); + } + }); + response.endHandler( + end -> { + assertEquals(0, results.size()); + testContext.completeNow(); + }); + response.exceptionHandler(testContext::failNow); + }); } @Test public void testPut7(Vertx vertx, VertxTestContext testContext) throws Exception { - RecordStoreProtocol.QueryRequest request = RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setSortBy(RecordStoreProtocol.SortByRequest.newBuilder().setType(RecordStoreProtocol.SortByType.SORT_BY_NEWEST_VERSION_FIRST) - .build()) - .build(); - - recordServiceVertxStub.query(request, response -> { - List results = new ArrayList<>(); - response.handler(req -> { - System.out.println("received a response"); - DemoUserProto.User p = null; - try { - p = DemoUserProto.User.parseFrom(req.getRecord()); - results.add(p); - } catch (InvalidProtocolBufferException e) { - testContext.failNow(e); - e.printStackTrace(); - } - }); - response.endHandler(end -> { - assertEquals(0, results.size()); - testContext.completeNow(); - }); - response.exceptionHandler(testContext::failNow); - }); + RecordStoreProtocol.QueryRequest request = + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setSortBy( + RecordStoreProtocol.SortByRequest.newBuilder() + .setType(RecordStoreProtocol.SortByType.SORT_BY_NEWEST_VERSION_FIRST) + .build()) + .build(); + + recordServiceVertxStub.query( + request, + response -> { + List results = new ArrayList<>(); + response.handler( + req -> { + System.out.println("received a response"); + DemoUserProto.User p = null; + try { + p = DemoUserProto.User.parseFrom(req.getRecord()); + results.add(p); + } catch (InvalidProtocolBufferException e) { + testContext.failNow(e); + e.printStackTrace(); + } + }); + response.endHandler( + end -> { + assertEquals(0, results.size()); + testContext.completeNow(); + }); + response.exceptionHandler(testContext::failNow); + }); } @AfterAll diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTestUnauthorized.java b/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTestUnauthorized.java index 4c60c1b..ae2ae02 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTestUnauthorized.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/GrpcVerticleTestUnauthorized.java @@ -26,6 +26,9 @@ import io.vertx.grpc.VertxChannelBuilder; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; +import java.io.File; +import java.io.IOException; +import java.util.Collections; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -33,10 +36,6 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.testcontainers.containers.AbstractFDBContainer; -import java.io.File; -import java.io.IOException; -import java.util.Collections; - @ExtendWith(VertxExtension.class) @TestInstance(TestInstance.Lifecycle.PER_CLASS) public class GrpcVerticleTestUnauthorized extends AbstractFDBContainer { @@ -47,25 +46,29 @@ public class GrpcVerticleTestUnauthorized extends AbstractFDBContainer { private File clusterFile; @BeforeAll - void deploy_verticle(Vertx vertx, VertxTestContext testContext) throws IOException, InterruptedException { + void deploy_verticle(Vertx vertx, VertxTestContext testContext) + throws IOException, InterruptedException { clusterFile = container.getClusterFile(); - DeploymentOptions options = new DeploymentOptions() - .setConfig(new JsonObject() - .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) - .put(Constants.CONFIG_GRPC_LISTEN_PORT, port)); + DeploymentOptions options = + new DeploymentOptions() + .setConfig( + new JsonObject() + .put(Constants.CONFIG_FDB_CLUSTER_FILE, clusterFile.getAbsolutePath()) + .put(Constants.CONFIG_GRPC_LISTEN_PORT, port)); BiscuitManager biscuitManager = new BiscuitManager(); String sealedBiscuit = biscuitManager.create(DEFAULT_TENANT, Collections.emptyList()); - BiscuitClientCredential credentials = new BiscuitClientCredential(DEFAULT_TENANT + "dsa", sealedBiscuit, this.getClass().getName()); + BiscuitClientCredential credentials = + new BiscuitClientCredential( + DEFAULT_TENANT + "dsa", sealedBiscuit, this.getClass().getName()); // deploy verticle - vertx.deployVerticle(new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); - ManagedChannel channel = VertxChannelBuilder - .forAddress(vertx, "localhost", port) - .usePlaintext(true) - .build(); + vertx.deployVerticle( + new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); + ManagedChannel channel = + VertxChannelBuilder.forAddress(vertx, "localhost", port).usePlaintext(true).build(); adminServiceVertxStub = AdminServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); } @@ -73,13 +76,15 @@ void deploy_verticle(Vertx vertx, VertxTestContext testContext) throws IOExcepti @Test public void testBadAuth(Vertx vertx, VertxTestContext testContext) throws Exception { - adminServiceVertxStub.ping(RecordStoreProtocol.EmptyRequest.newBuilder().build(), response -> { - if (response.succeeded()) { - testContext.failNow(response.cause()); - } else { - testContext.completeNow(); - } - }); + adminServiceVertxStub.ping( + RecordStoreProtocol.EmptyRequest.newBuilder().build(), + response -> { + if (response.succeeded()) { + testContext.failNow(response.cause()); + } else { + testContext.completeNow(); + } + }); } @AfterAll diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/PortManager.java b/record-store/src/test/java/fr/pierrezemb/recordstore/PortManager.java index 9e52e36..f65a02d 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/PortManager.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/PortManager.java @@ -26,7 +26,7 @@ public static synchronized int nextFreePort() { int port = nextPort++; try (ServerSocket ss = new ServerSocket(port)) { ss.close(); - //Give it some time to truly close the connection + // Give it some time to truly close the connection Thread.sleep(100); return port; } catch (Exception e) { diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/auth/BiscuitManagerTest.java b/record-store/src/test/java/fr/pierrezemb/recordstore/auth/BiscuitManagerTest.java index 42640dd..67d82de 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/auth/BiscuitManagerTest.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/auth/BiscuitManagerTest.java @@ -15,21 +15,21 @@ */ package fr.pierrezemb.recordstore.auth; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import com.clevercloud.biscuit.error.Error; import io.vavr.control.Either; +import java.util.Collections; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; -import java.util.Collections; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - @TestInstance(TestInstance.Lifecycle.PER_CLASS) class BiscuitManagerTest { - private static final String KEY = "3A8621F1847F19D6DAEAB5465CE8D3908B91C66FB9AF380D508FCF9253458907"; + private static final String KEY = + "3A8621F1847F19D6DAEAB5465CE8D3908B91C66FB9AF380D508FCF9253458907"; BiscuitManager biscuitManager; @BeforeAll diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGeneratorTest.java b/record-store/src/test/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGeneratorTest.java index 2d84317..55e45fe 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGeneratorTest.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/graphql/GraphQLSchemaGeneratorTest.java @@ -15,36 +15,38 @@ */ package fr.pierrezemb.recordstore.graphql; +import static fr.pierrezemb.recordstore.datasets.DatasetsLoader.DEFAULT_DEMO_TENANT; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.apple.foundationdb.record.RecordMetaData; import com.google.protobuf.Descriptors; import com.google.protobuf.InvalidProtocolBufferException; import fr.pierrezemb.recordstore.Constants; import fr.pierrezemb.recordstore.datasets.DatasetsLoader; import fr.pierrezemb.recordstore.fdb.RecordLayer; +import java.io.File; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import javax.crypto.spec.SecretKeySpec; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.testcontainers.containers.AbstractFDBContainer; import org.testcontainers.shaded.com.google.common.collect.ImmutableList; -import javax.crypto.spec.SecretKeySpec; -import java.io.File; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -import static fr.pierrezemb.recordstore.datasets.DatasetsLoader.DEFAULT_DEMO_TENANT; -import static org.junit.jupiter.api.Assertions.assertTrue; - @TestInstance(TestInstance.Lifecycle.PER_CLASS) class GraphQLSchemaGeneratorTest extends AbstractFDBContainer { private File clusterFile; private RecordLayer recordLayer; @BeforeAll - void setUp() throws InterruptedException, ExecutionException, TimeoutException, InvalidProtocolBufferException, Descriptors.DescriptorValidationException { + void setUp() + throws InterruptedException, ExecutionException, TimeoutException, + InvalidProtocolBufferException, Descriptors.DescriptorValidationException { clusterFile = container.getClusterFile(); - SecretKeySpec secretKey = new SecretKeySpec(Constants.CONFIG_ENCRYPTION_KEY_DEFAULT.getBytes(), "AES"); + SecretKeySpec secretKey = + new SecretKeySpec(Constants.CONFIG_ENCRYPTION_KEY_DEFAULT.getBytes(), "AES"); recordLayer = new RecordLayer(clusterFile.getAbsolutePath(), false, secretKey); DatasetsLoader datasetsLoader = new DatasetsLoader(recordLayer); @@ -57,15 +59,15 @@ void generate() { String schema = GraphQLSchemaGenerator.generate(metadata); System.out.println(schema); - ImmutableList shouldContains = ImmutableList.of( - "type User", - "email: String", - "id: Long", - "type Query {", - "allUsers(limit: Int): [User!]!", - "getUserByEmail(email: String): User!", - "getUserByName(name: String): User!" - ); + ImmutableList shouldContains = + ImmutableList.of( + "type User", + "email: String", + "id: Long", + "type Query {", + "allUsers(limit: Int): [User!]!", + "getUserByEmail(email: String): User!", + "getUserByName(name: String): User!"); for (String shouldContain : shouldContains) { assertTrue(schema.contains(shouldContain), "schema does not contain '" + shouldContain + "'"); } diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/grpc/ManagedKVServiceTest.java b/record-store/src/test/java/fr/pierrezemb/recordstore/grpc/ManagedKVServiceTest.java new file mode 100644 index 0000000..20e2502 --- /dev/null +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/grpc/ManagedKVServiceTest.java @@ -0,0 +1,148 @@ +/** + * Copyright 2020 Pierre Zemb + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package fr.pierrezemb.recordstore.grpc; + +import static fr.pierrezemb.recordstore.GrpcVerticleTest.DEFAULT_TENANT; +import static org.junit.Assert.assertEquals; + +import com.google.protobuf.ByteString; +import fr.pierrezemb.recordstore.GrpcVerticle; +import fr.pierrezemb.recordstore.PortManager; +import fr.pierrezemb.recordstore.auth.BiscuitClientCredential; +import fr.pierrezemb.recordstore.auth.BiscuitManager; +import fr.pierrezemb.recordstore.proto.managed.kv.ManagedKVGrpc; +import fr.pierrezemb.recordstore.proto.managed.kv.ManagedKVProto; +import io.grpc.ManagedChannel; +import io.vertx.core.DeploymentOptions; +import io.vertx.core.Vertx; +import io.vertx.core.json.JsonObject; +import io.vertx.grpc.VertxChannelBuilder; +import io.vertx.junit5.VertxExtension; +import io.vertx.junit5.VertxTestContext; +import java.io.File; +import java.nio.charset.Charset; +import java.util.Collections; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.extension.ExtendWith; +import org.testcontainers.containers.AbstractFDBContainer; + +@ExtendWith(VertxExtension.class) +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class ManagedKVServiceTest extends AbstractFDBContainer { + + public final int port = PortManager.nextFreePort(); + private ManagedKVGrpc.ManagedKVVertxStub managedKVVertxStub; + private File clusterFile; + + @BeforeAll + void deploy_verticle(Vertx vertx, VertxTestContext testContext) { + + clusterFile = container.getClusterFile(); + + DeploymentOptions options = + new DeploymentOptions() + .setConfig( + new JsonObject() + .put("fdb-cluster-file", clusterFile.getAbsolutePath()) + .put("grpc-listen-port", port)); + + BiscuitManager biscuitManager = new BiscuitManager(); + String sealedBiscuit = biscuitManager.create(DEFAULT_TENANT, Collections.emptyList()); + BiscuitClientCredential credentials = + new BiscuitClientCredential(DEFAULT_TENANT, sealedBiscuit, this.getClass().getName()); + + // deploy verticle + vertx.deployVerticle( + new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); + ManagedChannel channel = + VertxChannelBuilder.forAddress(vertx, "localhost", port).usePlaintext(true).build(); + + managedKVVertxStub = ManagedKVGrpc.newVertxStub(channel).withCallCredentials(credentials); + } + + @Test + @Order(1) + public void testPut(Vertx vertx, VertxTestContext testContext) throws Exception { + + managedKVVertxStub.put( + ManagedKVProto.KeyValue.newBuilder() + .setKey(ByteString.copyFrom("b", Charset.defaultCharset())) + .setValue(ByteString.copyFrom("toto", Charset.defaultCharset())) + .build(), + response -> { + if (response.succeeded()) { + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); + } + + @Test + @Order(2) + public void testGet(Vertx vertx, VertxTestContext testContext) throws Exception { + managedKVVertxStub.scan( + ManagedKVProto.ScanRequest.newBuilder() + .setStartKey(ByteString.copyFrom("b", Charset.defaultCharset())) + .build(), + keyValueGrpcReadStream -> + keyValueGrpcReadStream.handler( + keyValue -> { + assertEquals(1, keyValue.getKey().size()); + assertEquals(4, keyValue.getValue().size()); + testContext.completeNow(); + })); + } + + @Test + @Order(3) + public void testScan(Vertx vertx, VertxTestContext testContext) throws Exception { + managedKVVertxStub.scan( + ManagedKVProto.ScanRequest.newBuilder() + .setStartKey(ByteString.copyFrom("a", Charset.defaultCharset())) + .setEndKey(ByteString.copyFrom("c", Charset.defaultCharset())) + .build(), + keyValueGrpcReadStream -> + keyValueGrpcReadStream.handler( + keyValue -> { + assertEquals(1, keyValue.getKey().size()); + assertEquals(4, keyValue.getValue().size()); + testContext.completeNow(); + })); + } + + @Test + @Order(4) + public void testDelete(Vertx vertx, VertxTestContext testContext) throws Exception { + managedKVVertxStub.delete( + ManagedKVProto.DeleteRequest.newBuilder() + .setKeyToDelete(ByteString.copyFrom("b", Charset.defaultCharset())) + .build(), + response -> { + if (response.succeeded()) { + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); + } +} diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/grpc/SchemaAdminServiceTest.java b/record-store/src/test/java/fr/pierrezemb/recordstore/grpc/SchemaAdminServiceTest.java index fd9e120..0c447b9 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/grpc/SchemaAdminServiceTest.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/grpc/SchemaAdminServiceTest.java @@ -15,6 +15,9 @@ */ package fr.pierrezemb.recordstore.grpc; +import static fr.pierrezemb.recordstore.GrpcVerticleTest.DEFAULT_CONTAINER; +import static fr.pierrezemb.recordstore.GrpcVerticleTest.DEFAULT_TENANT; + import com.google.protobuf.DescriptorProtos; import fr.pierrezemb.recordstore.GrpcVerticle; import fr.pierrezemb.recordstore.PortManager; @@ -32,6 +35,8 @@ import io.vertx.grpc.VertxChannelBuilder; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; +import java.io.File; +import java.util.Collections; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.RepeatedTest; @@ -40,12 +45,6 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.testcontainers.containers.AbstractFDBContainer; -import java.io.File; -import java.util.Collections; - -import static fr.pierrezemb.recordstore.GrpcVerticleTest.DEFAULT_CONTAINER; -import static fr.pierrezemb.recordstore.GrpcVerticleTest.DEFAULT_TENANT; - @ExtendWith(VertxExtension.class) @TestInstance(TestInstance.Lifecycle.PER_CLASS) public class SchemaAdminServiceTest extends AbstractFDBContainer { @@ -60,23 +59,26 @@ void deploy_verticle(Vertx vertx, VertxTestContext testContext) { clusterFile = container.getClusterFile(); - DeploymentOptions options = new DeploymentOptions() - .setConfig(new JsonObject() - .put("fdb-cluster-file", clusterFile.getAbsolutePath()) - .put("grpc-listen-port", port)); + DeploymentOptions options = + new DeploymentOptions() + .setConfig( + new JsonObject() + .put("fdb-cluster-file", clusterFile.getAbsolutePath()) + .put("grpc-listen-port", port)); BiscuitManager biscuitManager = new BiscuitManager(); String sealedBiscuit = biscuitManager.create(DEFAULT_TENANT, Collections.emptyList()); - BiscuitClientCredential credentials = new BiscuitClientCredential(DEFAULT_TENANT, sealedBiscuit, this.getClass().getName()); + BiscuitClientCredential credentials = + new BiscuitClientCredential(DEFAULT_TENANT, sealedBiscuit, this.getClass().getName()); // deploy verticle - vertx.deployVerticle(new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); - ManagedChannel channel = VertxChannelBuilder - .forAddress(vertx, "localhost", port) - .usePlaintext(true) - .build(); + vertx.deployVerticle( + new GrpcVerticle(), options, testContext.succeeding(id -> testContext.completeNow())); + ManagedChannel channel = + VertxChannelBuilder.forAddress(vertx, "localhost", port).usePlaintext(true).build(); - schemaServiceVertxStub = SchemaServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); + schemaServiceVertxStub = + SchemaServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); adminServiceVertxStub = AdminServiceGrpc.newVertxStub(channel).withCallCredentials(credentials); } @@ -84,147 +86,162 @@ void deploy_verticle(Vertx vertx, VertxTestContext testContext) { public void testCRUDSchema1(Vertx vertx, VertxTestContext testContext) throws Exception { DescriptorProtos.FileDescriptorSet dependencies = - ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); - - - RecordStoreProtocol.UpsertSchemaRequest request = RecordStoreProtocol.UpsertSchemaRequest - .newBuilder() - .addRecordTypeIndexDefinitions(RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() - .setName("User") - .addPrimaryKeyFields("id") - .build()) - .setSchema(dependencies) - .build(); - - schemaServiceVertxStub.upsert(request, response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); + + RecordStoreProtocol.UpsertSchemaRequest request = + RecordStoreProtocol.UpsertSchemaRequest.newBuilder() + .addRecordTypeIndexDefinitions( + RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() + .setName("User") + .addPrimaryKeyFields("id") + .build()) + .setSchema(dependencies) + .build(); + + schemaServiceVertxStub.upsert( + request, + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testCRUDSchema2(Vertx vertx, VertxTestContext testContext) throws Exception { - schemaServiceVertxStub.get(RecordStoreProtocol.GetSchemaRequest.newBuilder() - .setRecordTypeName("User") - .build(), response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + schemaServiceVertxStub.get( + RecordStoreProtocol.GetSchemaRequest.newBuilder().setRecordTypeName("User").build(), + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testCRUDSchema3(Vertx vertx, VertxTestContext testContext) throws Exception { DescriptorProtos.FileDescriptorSet dependencies = - ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); - - - RecordStoreProtocol.UpsertSchemaRequest request = RecordStoreProtocol.UpsertSchemaRequest - .newBuilder() - .addRecordTypeIndexDefinitions(RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() - .setName("User") - .addPrimaryKeyFields("id") - .addIndexDefinitions(RecordStoreProtocol.IndexDefinition.newBuilder() - .setField("name") - .setIndexType(RecordStoreProtocol.IndexType.VALUE) - .build()) - .build()) - .setSchema(dependencies) - .build(); - - schemaServiceVertxStub.upsert(request, response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); + + RecordStoreProtocol.UpsertSchemaRequest request = + RecordStoreProtocol.UpsertSchemaRequest.newBuilder() + .addRecordTypeIndexDefinitions( + RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() + .setName("User") + .addPrimaryKeyFields("id") + .addIndexDefinitions( + RecordStoreProtocol.IndexDefinition.newBuilder() + .setField("name") + .setIndexType(RecordStoreProtocol.IndexType.VALUE) + .build()) + .build()) + .setSchema(dependencies) + .build(); + + schemaServiceVertxStub.upsert( + request, + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testCRUDSchema4(Vertx vertx, VertxTestContext testContext) throws Exception { DescriptorProtos.FileDescriptorSet dependencies = - ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); - - RecordStoreProtocol.UpsertSchemaRequest request = RecordStoreProtocol.UpsertSchemaRequest - .newBuilder() - .addRecordTypeIndexDefinitions(RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() - .setName("User") - .addPrimaryKeyFields("id") - // let's forget an index, this is working as we cannot delete an Index for now - .build()) - .setSchema(dependencies) - .build(); - - schemaServiceVertxStub.upsert(request, response -> { - if (response.succeeded()) { - testContext.completeNow(); - } else { - testContext.failNow(new Throwable("should have failed")); - } - }); + ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); + + RecordStoreProtocol.UpsertSchemaRequest request = + RecordStoreProtocol.UpsertSchemaRequest.newBuilder() + .addRecordTypeIndexDefinitions( + RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() + .setName("User") + .addPrimaryKeyFields("id") + // let's forget an index, this is working as we cannot delete an Index for now + .build()) + .setSchema(dependencies) + .build(); + + schemaServiceVertxStub.upsert( + request, + response -> { + if (response.succeeded()) { + testContext.completeNow(); + } else { + testContext.failNow(new Throwable("should have failed")); + } + }); } @RepeatedTest(value = 3) public void testCRUDSchema5(Vertx vertx, VertxTestContext testContext) throws Exception { DescriptorProtos.FileDescriptorSet dependencies = - ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); + ProtobufReflectionUtil.protoFileDescriptorSet(DemoUserProto.User.getDescriptor()); // upsert old schema should be harmless - RecordStoreProtocol.UpsertSchemaRequest request = RecordStoreProtocol.UpsertSchemaRequest - .newBuilder() - .addRecordTypeIndexDefinitions(RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() - .setName("User") - .addPrimaryKeyFields("id") - .addIndexDefinitions(RecordStoreProtocol.IndexDefinition.newBuilder() - .setField("name").build()) - .build()) - .setSchema(dependencies) - .build(); - - schemaServiceVertxStub.upsert(request, response -> { - if (response.succeeded()) { - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + RecordStoreProtocol.UpsertSchemaRequest request = + RecordStoreProtocol.UpsertSchemaRequest.newBuilder() + .addRecordTypeIndexDefinitions( + RecordStoreProtocol.RecordTypeIndexDefinition.newBuilder() + .setName("User") + .addPrimaryKeyFields("id") + .addIndexDefinitions( + RecordStoreProtocol.IndexDefinition.newBuilder().setField("name").build()) + .build()) + .setSchema(dependencies) + .build(); + + schemaServiceVertxStub.upsert( + request, + response -> { + if (response.succeeded()) { + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testCRUDSchema6(Vertx vertx, VertxTestContext testContext) throws Exception { - adminServiceVertxStub.list(RecordStoreProtocol.ListContainerRequest.newBuilder().build(), response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + adminServiceVertxStub.list( + RecordStoreProtocol.ListContainerRequest.newBuilder().build(), + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @Test public void testCRUDSchema7(Vertx vertx, VertxTestContext testContext) throws Exception { - adminServiceVertxStub.delete(RecordStoreProtocol.DeleteContainerRequest.newBuilder() - .addContainers(DEFAULT_CONTAINER) - .build(), response -> { - if (response.succeeded()) { - System.out.println("Got the server response: " + response.result()); - testContext.completeNow(); - } else { - testContext.failNow(response.cause()); - } - }); + adminServiceVertxStub.delete( + RecordStoreProtocol.DeleteContainerRequest.newBuilder() + .addContainers(DEFAULT_CONTAINER) + .build(), + response -> { + if (response.succeeded()) { + System.out.println("Got the server response: " + response.result()); + testContext.completeNow(); + } else { + testContext.failNow(response.cause()); + } + }); } @AfterAll diff --git a/record-store/src/test/java/fr/pierrezemb/recordstore/query/GrpcQueryGeneratorTest.java b/record-store/src/test/java/fr/pierrezemb/recordstore/query/GrpcQueryGeneratorTest.java index a8f5fd3..72074ab 100644 --- a/record-store/src/test/java/fr/pierrezemb/recordstore/query/GrpcQueryGeneratorTest.java +++ b/record-store/src/test/java/fr/pierrezemb/recordstore/query/GrpcQueryGeneratorTest.java @@ -22,15 +22,6 @@ import fr.pierrezemb.recordstore.datasets.DatasetsLoader; import fr.pierrezemb.recordstore.fdb.RecordLayer; import fr.pierrezemb.recordstore.proto.RecordStoreProtocol; -import org.junit.Assert; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; -import org.testcontainers.containers.AbstractFDBContainer; - -import javax.crypto.spec.SecretKeySpec; import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -38,21 +29,32 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.stream.Stream; +import javax.crypto.spec.SecretKeySpec; +import org.junit.Assert; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.testcontainers.containers.AbstractFDBContainer; @TestInstance(TestInstance.Lifecycle.PER_CLASS) /** - * GrpcQueryGeneratorTest is using the ./record-store/src/main/proto/demo_person.proto - * protobuf to test some queries + * GrpcQueryGeneratorTest is using the ./record-store/src/main/proto/demo_person.proto protobuf to + * test some queries */ class GrpcQueryGeneratorTest extends AbstractFDBContainer { private File clusterFile; private RecordLayer recordLayer; @BeforeAll - void beforeAll() throws IOException, InterruptedException, TimeoutException, ExecutionException, Descriptors.DescriptorValidationException { + void beforeAll() + throws IOException, InterruptedException, TimeoutException, ExecutionException, + Descriptors.DescriptorValidationException { clusterFile = container.getClusterFile(); - SecretKeySpec secretKey = new SecretKeySpec(Constants.CONFIG_ENCRYPTION_KEY_DEFAULT.getBytes(), "AES"); + SecretKeySpec secretKey = + new SecretKeySpec(Constants.CONFIG_ENCRYPTION_KEY_DEFAULT.getBytes(), "AES"); recordLayer = new RecordLayer(clusterFile.getAbsolutePath(), false, secretKey); DatasetsLoader datasetsLoader = new DatasetsLoader(recordLayer); @@ -64,7 +66,8 @@ void beforeAll() throws IOException, InterruptedException, TimeoutException, Exe public void testQuery(RecordStoreProtocol.QueryRequest request, int expectedResult) { RecordQuery query = GrpcQueryGenerator.generate(request); - List results = this.recordLayer.queryRecords(DatasetsLoader.DEFAULT_DEMO_TENANT, "USER", query); + List results = + this.recordLayer.queryRecords(DatasetsLoader.DEFAULT_DEMO_TENANT, "USER", query); if (expectedResult == -1) { Assert.assertTrue("empty results", results.size() > 0); } else { @@ -74,166 +77,228 @@ public void testQuery(RecordStoreProtocol.QueryRequest request, int expectedResu private Stream generateRequest() { return Stream.of( - // all records - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .build(), 100), - - // get on id - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id").setInt64Value(1) - .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) - .build()) - .build()) - .build(), 1), - - // range - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setAndNode(RecordStoreProtocol.QueryFilterAndNode.newBuilder() - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(1) - .setOperation(RecordStoreProtocol.FilterOperation.GREATER_THAN_OR_EQUALS) - .build()) - .build()) - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setInt64Value(10) - .setField("id") - .setOperation(RecordStoreProtocol.FilterOperation.LESS_THAN_OR_EQUALS) - .build()) - .build()) - .build()) - .build()) - .build(), 10), - - // or - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setOrNode(RecordStoreProtocol.QueryFilterOrNode.newBuilder() - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("id") - .setInt64Value(1) - .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) - .build()) - .build()) - .addNodes(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setInt64Value(10) - .setField("id") - .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) - .build()) - .build()) - .build()) - .build()) - .build(), 2), - - // text index any - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("rick_and_morty_quotes") - .setOperation(RecordStoreProtocol.FilterOperation.TEXT_CONTAINS_ANY) - .addTokens("jerry") - .build()) - .build()) - .build(), -1), - - // text index all - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("rick_and_morty_quotes") - .setOperation(RecordStoreProtocol.FilterOperation.TEXT_CONTAINS_ALL) - .addAllTokens(Arrays.asList("MR MEESEEKS LOOK AT ME".toLowerCase().split(" "))) - .build()) - .build()) - .build(), -1), - - // query over a repeated field - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("beers") - .setIsFieldDefinedAsRepeated(true) - .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) - .setStringValue("Trappistes Rochefort 10") - .build()) - .build()) - .build(), -1), - - // query over an indexed map with constraint on key and value - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setMapNode(RecordStoreProtocol.QueryFilterMapNode.newBuilder() - .setField("favorite_locations_from_tv") - .setKey(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setOperation(RecordStoreProtocol.FilterOperation.START_WITH) - .setStringValue("hitchhikers_guide") - .build()) - .setValue(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setStringValue("Eroticon VI") - .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) - .build()) - .build()) - .build()) - .build(), -1), - - // query over an indexed map with constraint on value - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setMapNode(RecordStoreProtocol.QueryFilterMapNode.newBuilder() - .setField("favorite_locations_from_tv") - .setValue(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setStringValue("Earth") - .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) - .build()) - .build()) - .build()) - .build(), -1), - - // query over an indexed map with constraint on key - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setMapNode(RecordStoreProtocol.QueryFilterMapNode.newBuilder() - .setField("favorite_locations_from_tv") - .setKey(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setStringValue("hitch") - .setOperation(RecordStoreProtocol.FilterOperation.START_WITH) - .build()) - .build()) - .build()) - .build(), 100), - - // query over an indexed nested field - Arguments.of(RecordStoreProtocol.QueryRequest.newBuilder() - .setRecordTypeName("User") - .setFilter(RecordStoreProtocol.QueryFilterNode.newBuilder() - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("address") - .setOperation(RecordStoreProtocol.FilterOperation.MATCHES) - .setFieldNode(RecordStoreProtocol.QueryFilterFieldNode.newBuilder() - .setField("city") - .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) - .setStringValue("Antown") - .build()) - .build()) - .build()) - .build(), -1) - ); + // all records + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder().setRecordTypeName("User").build(), 100), + + // get on id + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(1) + .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) + .build()) + .build()) + .build(), + 1), + + // range + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setAndNode( + RecordStoreProtocol.QueryFilterAndNode.newBuilder() + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(1) + .setOperation( + RecordStoreProtocol.FilterOperation + .GREATER_THAN_OR_EQUALS) + .build()) + .build()) + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setInt64Value(10) + .setField("id") + .setOperation( + RecordStoreProtocol.FilterOperation + .LESS_THAN_OR_EQUALS) + .build()) + .build()) + .build()) + .build()) + .build(), + 10), + + // or + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setOrNode( + RecordStoreProtocol.QueryFilterOrNode.newBuilder() + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("id") + .setInt64Value(1) + .setOperation( + RecordStoreProtocol.FilterOperation.EQUALS) + .build()) + .build()) + .addNodes( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setInt64Value(10) + .setField("id") + .setOperation( + RecordStoreProtocol.FilterOperation.EQUALS) + .build()) + .build()) + .build()) + .build()) + .build(), + 2), + + // text index any + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("rick_and_morty_quotes") + .setOperation(RecordStoreProtocol.FilterOperation.TEXT_CONTAINS_ANY) + .addTokens("jerry") + .build()) + .build()) + .build(), + -1), + + // text index all + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("rick_and_morty_quotes") + .setOperation(RecordStoreProtocol.FilterOperation.TEXT_CONTAINS_ALL) + .addAllTokens( + Arrays.asList( + "MR MEESEEKS LOOK AT ME".toLowerCase().split(" "))) + .build()) + .build()) + .build(), + -1), + + // query over a repeated field + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("beers") + .setIsFieldDefinedAsRepeated(true) + .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) + .setStringValue("Trappistes Rochefort 10") + .build()) + .build()) + .build(), + -1), + + // query over an indexed map with constraint on key and value + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setMapNode( + RecordStoreProtocol.QueryFilterMapNode.newBuilder() + .setField("favorite_locations_from_tv") + .setKey( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setOperation( + RecordStoreProtocol.FilterOperation.START_WITH) + .setStringValue("hitchhikers_guide") + .build()) + .setValue( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setStringValue("Eroticon VI") + .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) + .build()) + .build()) + .build()) + .build(), + -1), + + // query over an indexed map with constraint on value + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setMapNode( + RecordStoreProtocol.QueryFilterMapNode.newBuilder() + .setField("favorite_locations_from_tv") + .setValue( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setStringValue("Earth") + .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) + .build()) + .build()) + .build()) + .build(), + -1), + + // query over an indexed map with constraint on key + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setMapNode( + RecordStoreProtocol.QueryFilterMapNode.newBuilder() + .setField("favorite_locations_from_tv") + .setKey( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setStringValue("hitch") + .setOperation( + RecordStoreProtocol.FilterOperation.START_WITH) + .build()) + .build()) + .build()) + .build(), + 100), + + // query over an indexed nested field + Arguments.of( + RecordStoreProtocol.QueryRequest.newBuilder() + .setRecordTypeName("User") + .setFilter( + RecordStoreProtocol.QueryFilterNode.newBuilder() + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("address") + .setOperation(RecordStoreProtocol.FilterOperation.MATCHES) + .setFieldNode( + RecordStoreProtocol.QueryFilterFieldNode.newBuilder() + .setField("city") + .setOperation(RecordStoreProtocol.FilterOperation.EQUALS) + .setStringValue("Antown") + .build()) + .build()) + .build()) + .build(), + -1)); } } diff --git a/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/AbstractFDBContainer.java b/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/AbstractFDBContainer.java index 7d3cc84..8f6d329 100644 --- a/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/AbstractFDBContainer.java +++ b/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/AbstractFDBContainer.java @@ -16,8 +16,8 @@ package org.testcontainers.containers; /** - * Use the singleton pattern to have only one FDB started - * Each tests will use the multi-tenancy feature of the record-store + * Use the singleton pattern to have only one FDB started Each tests will use the multi-tenancy + * feature of the record-store */ public abstract class AbstractFDBContainer { public static final FoundationDBContainer container; diff --git a/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/FoundationDBContainer.java b/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/FoundationDBContainer.java index d23c63e..becf8c8 100644 --- a/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/FoundationDBContainer.java +++ b/testcontainers-foundationdb/src/main/java/org/testcontainers/containers/FoundationDBContainer.java @@ -16,13 +16,12 @@ package org.testcontainers.containers; import com.github.dockerjava.api.command.InspectContainerResponse; +import java.io.File; +import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.containers.wait.strategy.Wait; -import java.io.File; -import java.io.IOException; - public class FoundationDBContainer extends GenericContainer { public static final int FDB_PORT = 4500; private static final String FDB_VERSION = "6.2.19"; @@ -43,7 +42,8 @@ public FoundationDBContainer(String fdbVersion) { @Override protected void containerIsStarted(InspectContainerResponse containerInfo) { try { - Container.ExecResult initResult = execInContainer("fdbcli", "--exec", "configure new single memory"); + Container.ExecResult initResult = + execInContainer("fdbcli", "--exec", "configure new single memory"); String stdout = initResult.getStdout(); log.debug("init FDB stdout: " + stdout); int exitCode = initResult.getExitCode(); @@ -74,9 +74,8 @@ protected void containerIsStarted(InspectContainerResponse containerInfo) { } /** - * A hook that is executed after the container is stopped with {@link #stop()}. - * Warning! This hook won't be executed if the container is terminated during - * the JVM's shutdown hook or by Ryuk. + * A hook that is executed after the container is stopped with {@link #stop()}. Warning! This hook + * won't be executed if the container is terminated during the JVM's shutdown hook or by Ryuk. * * @param containerInfo */ @@ -89,5 +88,4 @@ protected void containerIsStopped(InspectContainerResponse containerInfo) { public File getClusterFile() { return clusterFile; } - }