diff --git a/Cargo.lock b/Cargo.lock index 8d0210c3f81..677f0e84593 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1502,7 +1502,6 @@ dependencies = [ "nohash-hasher", "num_enum 0.7.3", "once_cell", - "ordered-float", "platform-serialization", "platform-serialization-derive", "platform-value", @@ -3424,17 +3423,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "ordered-float" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" -dependencies = [ - "num-traits", - "rand", - "serde", -] - [[package]] name = "overload" version = "0.1.1" @@ -3888,7 +3876,6 @@ dependencies = [ "libc", "rand_chacha", "rand_core", - "serde", ] [[package]] @@ -3908,7 +3895,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", - "serde", ] [[package]] diff --git a/packages/dapi-grpc/clients/platform/v0/nodejs/platform_pbjs.js b/packages/dapi-grpc/clients/platform/v0/nodejs/platform_pbjs.js index 3e3651d0586..6cf6ca064c2 100644 --- a/packages/dapi-grpc/clients/platform/v0/nodejs/platform_pbjs.js +++ b/packages/dapi-grpc/clients/platform/v0/nodejs/platform_pbjs.js @@ -64451,315 +64451,6 @@ $root.org = (function() { return PersonalEncryptedNote; })(); - GetGroupActionsResponseV0.TransferEvent = (function() { - - /** - * Properties of a TransferEvent. - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0 - * @interface ITransferEvent - * @property {Uint8Array|null} [recipientId] TransferEvent recipientId - * @property {string|null} [publicNote] TransferEvent publicNote - * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ISharedEncryptedNote|null} [sharedEncryptedNote] TransferEvent sharedEncryptedNote - * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.IPersonalEncryptedNote|null} [personalEncryptedNote] TransferEvent personalEncryptedNote - * @property {number|Long|null} [amount] TransferEvent amount - */ - - /** - * Constructs a new TransferEvent. - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0 - * @classdesc Represents a TransferEvent. - * @implements ITransferEvent - * @constructor - * @param {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ITransferEvent=} [properties] Properties to set - */ - function TransferEvent(properties) { - if (properties) - for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * TransferEvent recipientId. - * @member {Uint8Array} recipientId - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @instance - */ - TransferEvent.prototype.recipientId = $util.newBuffer([]); - - /** - * TransferEvent publicNote. - * @member {string} publicNote - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @instance - */ - TransferEvent.prototype.publicNote = ""; - - /** - * TransferEvent sharedEncryptedNote. - * @member {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ISharedEncryptedNote|null|undefined} sharedEncryptedNote - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @instance - */ - TransferEvent.prototype.sharedEncryptedNote = null; - - /** - * TransferEvent personalEncryptedNote. - * @member {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.IPersonalEncryptedNote|null|undefined} personalEncryptedNote - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @instance - */ - TransferEvent.prototype.personalEncryptedNote = null; - - /** - * TransferEvent amount. - * @member {number|Long} amount - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @instance - */ - TransferEvent.prototype.amount = $util.Long ? $util.Long.fromBits(0,0,true) : 0; - - /** - * Creates a new TransferEvent instance using the specified properties. - * @function create - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ITransferEvent=} [properties] Properties to set - * @returns {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} TransferEvent instance - */ - TransferEvent.create = function create(properties) { - return new TransferEvent(properties); - }; - - /** - * Encodes the specified TransferEvent message. Does not implicitly {@link org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.verify|verify} messages. - * @function encode - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ITransferEvent} message TransferEvent message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - TransferEvent.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.recipientId != null && Object.hasOwnProperty.call(message, "recipientId")) - writer.uint32(/* id 1, wireType 2 =*/10).bytes(message.recipientId); - if (message.publicNote != null && Object.hasOwnProperty.call(message, "publicNote")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.publicNote); - if (message.sharedEncryptedNote != null && Object.hasOwnProperty.call(message, "sharedEncryptedNote")) - $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.encode(message.sharedEncryptedNote, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.personalEncryptedNote != null && Object.hasOwnProperty.call(message, "personalEncryptedNote")) - $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.encode(message.personalEncryptedNote, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.amount != null && Object.hasOwnProperty.call(message, "amount")) - writer.uint32(/* id 5, wireType 0 =*/40).uint64(message.amount); - return writer; - }; - - /** - * Encodes the specified TransferEvent message, length delimited. Does not implicitly {@link org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.verify|verify} messages. - * @function encodeDelimited - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ITransferEvent} message TransferEvent message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - TransferEvent.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a TransferEvent message from the specified reader or buffer. - * @function decode - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} TransferEvent - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - TransferEvent.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent(); - while (reader.pos < end) { - var tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.recipientId = reader.bytes(); - break; - case 2: - message.publicNote = reader.string(); - break; - case 3: - message.sharedEncryptedNote = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.decode(reader, reader.uint32()); - break; - case 4: - message.personalEncryptedNote = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.decode(reader, reader.uint32()); - break; - case 5: - message.amount = reader.uint64(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a TransferEvent message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} TransferEvent - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - TransferEvent.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a TransferEvent message. - * @function verify - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - TransferEvent.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.recipientId != null && message.hasOwnProperty("recipientId")) - if (!(message.recipientId && typeof message.recipientId.length === "number" || $util.isString(message.recipientId))) - return "recipientId: buffer expected"; - if (message.publicNote != null && message.hasOwnProperty("publicNote")) - if (!$util.isString(message.publicNote)) - return "publicNote: string expected"; - if (message.sharedEncryptedNote != null && message.hasOwnProperty("sharedEncryptedNote")) { - var error = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.verify(message.sharedEncryptedNote); - if (error) - return "sharedEncryptedNote." + error; - } - if (message.personalEncryptedNote != null && message.hasOwnProperty("personalEncryptedNote")) { - var error = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.verify(message.personalEncryptedNote); - if (error) - return "personalEncryptedNote." + error; - } - if (message.amount != null && message.hasOwnProperty("amount")) - if (!$util.isInteger(message.amount) && !(message.amount && $util.isInteger(message.amount.low) && $util.isInteger(message.amount.high))) - return "amount: integer|Long expected"; - return null; - }; - - /** - * Creates a TransferEvent message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {Object.} object Plain object - * @returns {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} TransferEvent - */ - TransferEvent.fromObject = function fromObject(object) { - if (object instanceof $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent) - return object; - var message = new $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent(); - if (object.recipientId != null) - if (typeof object.recipientId === "string") - $util.base64.decode(object.recipientId, message.recipientId = $util.newBuffer($util.base64.length(object.recipientId)), 0); - else if (object.recipientId.length >= 0) - message.recipientId = object.recipientId; - if (object.publicNote != null) - message.publicNote = String(object.publicNote); - if (object.sharedEncryptedNote != null) { - if (typeof object.sharedEncryptedNote !== "object") - throw TypeError(".org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.sharedEncryptedNote: object expected"); - message.sharedEncryptedNote = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.fromObject(object.sharedEncryptedNote); - } - if (object.personalEncryptedNote != null) { - if (typeof object.personalEncryptedNote !== "object") - throw TypeError(".org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.personalEncryptedNote: object expected"); - message.personalEncryptedNote = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.fromObject(object.personalEncryptedNote); - } - if (object.amount != null) - if ($util.Long) - (message.amount = $util.Long.fromValue(object.amount)).unsigned = true; - else if (typeof object.amount === "string") - message.amount = parseInt(object.amount, 10); - else if (typeof object.amount === "number") - message.amount = object.amount; - else if (typeof object.amount === "object") - message.amount = new $util.LongBits(object.amount.low >>> 0, object.amount.high >>> 0).toNumber(true); - return message; - }; - - /** - * Creates a plain object from a TransferEvent message. Also converts values to other types if specified. - * @function toObject - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @static - * @param {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} message TransferEvent - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - TransferEvent.toObject = function toObject(message, options) { - if (!options) - options = {}; - var object = {}; - if (options.defaults) { - if (options.bytes === String) - object.recipientId = ""; - else { - object.recipientId = []; - if (options.bytes !== Array) - object.recipientId = $util.newBuffer(object.recipientId); - } - object.publicNote = ""; - object.sharedEncryptedNote = null; - object.personalEncryptedNote = null; - if ($util.Long) { - var long = new $util.Long(0, 0, true); - object.amount = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.amount = options.longs === String ? "0" : 0; - } - if (message.recipientId != null && message.hasOwnProperty("recipientId")) - object.recipientId = options.bytes === String ? $util.base64.encode(message.recipientId, 0, message.recipientId.length) : options.bytes === Array ? Array.prototype.slice.call(message.recipientId) : message.recipientId; - if (message.publicNote != null && message.hasOwnProperty("publicNote")) - object.publicNote = message.publicNote; - if (message.sharedEncryptedNote != null && message.hasOwnProperty("sharedEncryptedNote")) - object.sharedEncryptedNote = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.toObject(message.sharedEncryptedNote, options); - if (message.personalEncryptedNote != null && message.hasOwnProperty("personalEncryptedNote")) - object.personalEncryptedNote = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.toObject(message.personalEncryptedNote, options); - if (message.amount != null && message.hasOwnProperty("amount")) - if (typeof message.amount === "number") - object.amount = options.longs === String ? String(message.amount) : message.amount; - else - object.amount = options.longs === String ? $util.Long.prototype.toString.call(message.amount) : options.longs === Number ? new $util.LongBits(message.amount.low >>> 0, message.amount.high >>> 0).toNumber(true) : message.amount; - return object; - }; - - /** - * Converts this TransferEvent to JSON. - * @function toJSON - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent - * @instance - * @returns {Object.} JSON object - */ - TransferEvent.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - return TransferEvent; - })(); - GetGroupActionsResponseV0.EmergencyActionEvent = (function() { /** @@ -66320,7 +66011,6 @@ $root.org = (function() { * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.IFreezeEvent|null} [freeze] TokenEvent freeze * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.IUnfreezeEvent|null} [unfreeze] TokenEvent unfreeze * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.IDestroyFrozenFundsEvent|null} [destroyFrozenFunds] TokenEvent destroyFrozenFunds - * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ITransferEvent|null} [transfer] TokenEvent transfer * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.IEmergencyActionEvent|null} [emergencyAction] TokenEvent emergencyAction * @property {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ITokenConfigUpdateEvent|null} [tokenConfigUpdate] TokenEvent tokenConfigUpdate */ @@ -66380,14 +66070,6 @@ $root.org = (function() { */ TokenEvent.prototype.destroyFrozenFunds = null; - /** - * TokenEvent transfer. - * @member {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ITransferEvent|null|undefined} transfer - * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent - * @instance - */ - TokenEvent.prototype.transfer = null; - /** * TokenEvent emergencyAction. * @member {org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.IEmergencyActionEvent|null|undefined} emergencyAction @@ -66409,12 +66091,12 @@ $root.org = (function() { /** * TokenEvent type. - * @member {"mint"|"burn"|"freeze"|"unfreeze"|"destroyFrozenFunds"|"transfer"|"emergencyAction"|"tokenConfigUpdate"|undefined} type + * @member {"mint"|"burn"|"freeze"|"unfreeze"|"destroyFrozenFunds"|"emergencyAction"|"tokenConfigUpdate"|undefined} type * @memberof org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent * @instance */ Object.defineProperty(TokenEvent.prototype, "type", { - get: $util.oneOfGetter($oneOfFields = ["mint", "burn", "freeze", "unfreeze", "destroyFrozenFunds", "transfer", "emergencyAction", "tokenConfigUpdate"]), + get: $util.oneOfGetter($oneOfFields = ["mint", "burn", "freeze", "unfreeze", "destroyFrozenFunds", "emergencyAction", "tokenConfigUpdate"]), set: $util.oneOfSetter($oneOfFields) }); @@ -66452,12 +66134,10 @@ $root.org = (function() { $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEvent.encode(message.unfreeze, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.destroyFrozenFunds != null && Object.hasOwnProperty.call(message, "destroyFrozenFunds")) $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.encode(message.destroyFrozenFunds, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.transfer != null && Object.hasOwnProperty.call(message, "transfer")) - $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.encode(message.transfer, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); if (message.emergencyAction != null && Object.hasOwnProperty.call(message, "emergencyAction")) - $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.encode(message.emergencyAction, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.encode(message.emergencyAction, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); if (message.tokenConfigUpdate != null && Object.hasOwnProperty.call(message, "tokenConfigUpdate")) - $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.encode(message.tokenConfigUpdate, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.encode(message.tokenConfigUpdate, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); return writer; }; @@ -66508,12 +66188,9 @@ $root.org = (function() { message.destroyFrozenFunds = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.decode(reader, reader.uint32()); break; case 6: - message.transfer = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.decode(reader, reader.uint32()); - break; - case 7: message.emergencyAction = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.decode(reader, reader.uint32()); break; - case 8: + case 7: message.tokenConfigUpdate = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.decode(reader, reader.uint32()); break; default: @@ -66600,16 +66277,6 @@ $root.org = (function() { return "destroyFrozenFunds." + error; } } - if (message.transfer != null && message.hasOwnProperty("transfer")) { - if (properties.type === 1) - return "type: multiple values"; - properties.type = 1; - { - var error = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.verify(message.transfer); - if (error) - return "transfer." + error; - } - } if (message.emergencyAction != null && message.hasOwnProperty("emergencyAction")) { if (properties.type === 1) return "type: multiple values"; @@ -66670,11 +66337,6 @@ $root.org = (function() { throw TypeError(".org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.destroyFrozenFunds: object expected"); message.destroyFrozenFunds = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.fromObject(object.destroyFrozenFunds); } - if (object.transfer != null) { - if (typeof object.transfer !== "object") - throw TypeError(".org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.transfer: object expected"); - message.transfer = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.fromObject(object.transfer); - } if (object.emergencyAction != null) { if (typeof object.emergencyAction !== "object") throw TypeError(".org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.emergencyAction: object expected"); @@ -66726,11 +66388,6 @@ $root.org = (function() { if (options.oneofs) object.type = "destroyFrozenFunds"; } - if (message.transfer != null && message.hasOwnProperty("transfer")) { - object.transfer = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.toObject(message.transfer, options); - if (options.oneofs) - object.type = "transfer"; - } if (message.emergencyAction != null && message.hasOwnProperty("emergencyAction")) { object.emergencyAction = $root.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.toObject(message.emergencyAction, options); if (options.oneofs) diff --git a/packages/dapi-grpc/clients/platform/v0/nodejs/platform_protoc.js b/packages/dapi-grpc/clients/platform/v0/nodejs/platform_protoc.js index 36356ff116d..db423372350 100644 --- a/packages/dapi-grpc/clients/platform/v0/nodejs/platform_protoc.js +++ b/packages/dapi-grpc/clients/platform/v0/nodejs/platform_protoc.js @@ -178,7 +178,6 @@ goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGr goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.TypeCase', null, { proto }); -goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.VersionCase', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupInfoRequest', null, { proto }); @@ -5963,27 +5962,6 @@ if (goog.DEBUG && !COMPILED) { */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.displayName = 'proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote'; } -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.displayName = 'proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent'; -} /** * Generated by JsPbCodeGenerator. * @param {Array=} opt_data Optional initial data array, typically from a @@ -63042,340 +63020,6 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.toObject = function(opt_includeInstance) { - return proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.toObject = function(includeInstance, msg) { - var f, obj = { - recipientId: msg.getRecipientId_asB64(), - publicNote: jspb.Message.getFieldWithDefault(msg, 2, ""), - sharedEncryptedNote: (f = msg.getSharedEncryptedNote()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.toObject(includeInstance, f), - personalEncryptedNote: (f = msg.getPersonalEncryptedNote()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.toObject(includeInstance, f), - amount: jspb.Message.getFieldWithDefault(msg, 5, 0) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent; - return proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {!Uint8Array} */ (reader.readBytes()); - msg.setRecipientId(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setPublicNote(value); - break; - case 3: - var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote; - reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.deserializeBinaryFromReader); - msg.setSharedEncryptedNote(value); - break; - case 4: - var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote; - reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.deserializeBinaryFromReader); - msg.setPersonalEncryptedNote(value); - break; - case 5: - var value = /** @type {number} */ (reader.readUint64()); - msg.setAmount(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getRecipientId_asU8(); - if (f.length > 0) { - writer.writeBytes( - 1, - f - ); - } - f = /** @type {string} */ (jspb.Message.getField(message, 2)); - if (f != null) { - writer.writeString( - 2, - f - ); - } - f = message.getSharedEncryptedNote(); - if (f != null) { - writer.writeMessage( - 3, - f, - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.serializeBinaryToWriter - ); - } - f = message.getPersonalEncryptedNote(); - if (f != null) { - writer.writeMessage( - 4, - f, - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.serializeBinaryToWriter - ); - } - f = message.getAmount(); - if (f !== 0) { - writer.writeUint64( - 5, - f - ); - } -}; - - -/** - * optional bytes recipient_id = 1; - * @return {string} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getRecipientId = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * optional bytes recipient_id = 1; - * This is a type-conversion wrapper around `getRecipientId()` - * @return {string} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getRecipientId_asB64 = function() { - return /** @type {string} */ (jspb.Message.bytesAsB64( - this.getRecipientId())); -}; - - -/** - * optional bytes recipient_id = 1; - * Note that Uint8Array is not supported on all browsers. - * @see http://caniuse.com/Uint8Array - * This is a type-conversion wrapper around `getRecipientId()` - * @return {!Uint8Array} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getRecipientId_asU8 = function() { - return /** @type {!Uint8Array} */ (jspb.Message.bytesAsU8( - this.getRecipientId())); -}; - - -/** - * @param {!(string|Uint8Array)} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setRecipientId = function(value) { - return jspb.Message.setProto3BytesField(this, 1, value); -}; - - -/** - * optional string public_note = 2; - * @return {string} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getPublicNote = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setPublicNote = function(value) { - return jspb.Message.setField(this, 2, value); -}; - - -/** - * Clears the field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.clearPublicNote = function() { - return jspb.Message.setField(this, 2, undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.hasPublicNote = function() { - return jspb.Message.getField(this, 2) != null; -}; - - -/** - * optional SharedEncryptedNote shared_encrypted_note = 3; - * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getSharedEncryptedNote = function() { - return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote, 3)); -}; - - -/** - * @param {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote|undefined} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this -*/ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setSharedEncryptedNote = function(value) { - return jspb.Message.setWrapperField(this, 3, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.clearSharedEncryptedNote = function() { - return this.setSharedEncryptedNote(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.hasSharedEncryptedNote = function() { - return jspb.Message.getField(this, 3) != null; -}; - - -/** - * optional PersonalEncryptedNote personal_encrypted_note = 4; - * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getPersonalEncryptedNote = function() { - return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote, 4)); -}; - - -/** - * @param {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote|undefined} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this -*/ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setPersonalEncryptedNote = function(value) { - return jspb.Message.setWrapperField(this, 4, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.clearPersonalEncryptedNote = function() { - return this.setPersonalEncryptedNote(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.hasPersonalEncryptedNote = function() { - return jspb.Message.getField(this, 4) != null; -}; - - -/** - * optional uint64 amount = 5; - * @return {number} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getAmount = function() { - return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 5, 0)); -}; - - -/** - * @param {number} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setAmount = function(value) { - return jspb.Message.setProto3IntField(this, 5, value); -}; - - - - - if (jspb.Message.GENERATE_TO_OBJECT) { /** * Creates an object representation of this proto. @@ -64710,7 +64354,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @private {!Array>} * @const */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_ = [[1,2,3,4,5,6,7,8]]; +proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_ = [[1,2,3,4,5,6,7]]; /** * @enum {number} @@ -64722,9 +64366,8 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV FREEZE: 3, UNFREEZE: 4, DESTROY_FROZEN_FUNDS: 5, - TRANSFER: 6, - EMERGENCY_ACTION: 7, - TOKEN_CONFIG_UPDATE: 8 + EMERGENCY_ACTION: 6, + TOKEN_CONFIG_UPDATE: 7 }; /** @@ -64770,7 +64413,6 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV freeze: (f = msg.getFreeze()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.FreezeEvent.toObject(includeInstance, f), unfreeze: (f = msg.getUnfreeze()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEvent.toObject(includeInstance, f), destroyFrozenFunds: (f = msg.getDestroyFrozenFunds()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.toObject(includeInstance, f), - transfer: (f = msg.getTransfer()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.toObject(includeInstance, f), emergencyAction: (f = msg.getEmergencyAction()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.toObject(includeInstance, f), tokenConfigUpdate: (f = msg.getTokenConfigUpdate()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.toObject(includeInstance, f) }; @@ -64835,16 +64477,11 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV msg.setDestroyFrozenFunds(value); break; case 6: - var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent; - reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinaryFromReader); - msg.setTransfer(value); - break; - case 7: var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent; reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.deserializeBinaryFromReader); msg.setEmergencyAction(value); break; - case 8: + case 7: var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent; reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.deserializeBinaryFromReader); msg.setTokenConfigUpdate(value); @@ -64918,18 +64555,10 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.serializeBinaryToWriter ); } - f = message.getTransfer(); - if (f != null) { - writer.writeMessage( - 6, - f, - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.serializeBinaryToWriter - ); - } f = message.getEmergencyAction(); if (f != null) { writer.writeMessage( - 7, + 6, f, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.serializeBinaryToWriter ); @@ -64937,7 +64566,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV f = message.getTokenConfigUpdate(); if (f != null) { writer.writeMessage( - 8, + 7, f, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.serializeBinaryToWriter ); @@ -65131,49 +64760,12 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV /** - * optional TransferEvent transfer = 6; - * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.getTransfer = function() { - return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent, 6)); -}; - - -/** - * @param {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent|undefined} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this -*/ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.setTransfer = function(value) { - return jspb.Message.setOneofWrapperField(this, 6, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.clearTransfer = function() { - return this.setTransfer(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.hasTransfer = function() { - return jspb.Message.getField(this, 6) != null; -}; - - -/** - * optional EmergencyActionEvent emergency_action = 7; + * optional EmergencyActionEvent emergency_action = 6; * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.getEmergencyAction = function() { return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent, 7)); + jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent, 6)); }; @@ -65182,7 +64774,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.setEmergencyAction = function(value) { - return jspb.Message.setOneofWrapperField(this, 7, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); + return jspb.Message.setOneofWrapperField(this, 6, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); }; @@ -65200,17 +64792,17 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {boolean} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.hasEmergencyAction = function() { - return jspb.Message.getField(this, 7) != null; + return jspb.Message.getField(this, 6) != null; }; /** - * optional TokenConfigUpdateEvent token_config_update = 8; + * optional TokenConfigUpdateEvent token_config_update = 7; * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.getTokenConfigUpdate = function() { return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent, 8)); + jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent, 7)); }; @@ -65219,7 +64811,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.setTokenConfigUpdate = function(value) { - return jspb.Message.setOneofWrapperField(this, 8, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); + return jspb.Message.setOneofWrapperField(this, 7, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); }; @@ -65237,7 +64829,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {boolean} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.hasTokenConfigUpdate = function() { - return jspb.Message.getField(this, 8) != null; + return jspb.Message.getField(this, 7) != null; }; diff --git a/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.h b/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.h index 3ebaf8d2312..88b3f2576d5 100644 --- a/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.h +++ b/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.h @@ -99,11 +99,8 @@ CF_EXTERN_C_BEGIN @class GetGroupActionsResponse_GetGroupActionsResponseV0_GroupActionEvent; @class GetGroupActionsResponse_GetGroupActionsResponseV0_GroupActions; @class GetGroupActionsResponse_GetGroupActionsResponseV0_MintEvent; -@class GetGroupActionsResponse_GetGroupActionsResponseV0_PersonalEncryptedNote; -@class GetGroupActionsResponse_GetGroupActionsResponseV0_SharedEncryptedNote; @class GetGroupActionsResponse_GetGroupActionsResponseV0_TokenConfigUpdateEvent; @class GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent; -@class GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent; @class GetGroupActionsResponse_GetGroupActionsResponseV0_UnfreezeEvent; @class GetGroupInfoRequest_GetGroupInfoRequestV0; @class GetGroupInfoResponse_GetGroupInfoResponseV0; @@ -6762,44 +6759,6 @@ GPB_FINAL @interface GetGroupActionsResponse_GetGroupActionsResponseV0_PersonalE @end -#pragma mark - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent - -typedef GPB_ENUM(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber) { - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_RecipientId = 1, - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_PublicNote = 2, - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_SharedEncryptedNote = 3, - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_PersonalEncryptedNote = 4, - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_Amount = 5, -}; - -/** - * Transfer event - **/ -GPB_FINAL @interface GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent : GPBMessage - -/** Recipient identifier */ -@property(nonatomic, readwrite, copy, null_resettable) NSData *recipientId; - -/** Public note */ -@property(nonatomic, readwrite, copy, null_resettable) NSString *publicNote; -/** Test to see if @c publicNote has been set. */ -@property(nonatomic, readwrite) BOOL hasPublicNote; - -/** Shared encrypted note */ -@property(nonatomic, readwrite, strong, null_resettable) GetGroupActionsResponse_GetGroupActionsResponseV0_SharedEncryptedNote *sharedEncryptedNote; -/** Test to see if @c sharedEncryptedNote has been set. */ -@property(nonatomic, readwrite) BOOL hasSharedEncryptedNote; - -/** Personal encrypted note */ -@property(nonatomic, readwrite, strong, null_resettable) GetGroupActionsResponse_GetGroupActionsResponseV0_PersonalEncryptedNote *personalEncryptedNote; -/** Test to see if @c personalEncryptedNote has been set. */ -@property(nonatomic, readwrite) BOOL hasPersonalEncryptedNote; - -/** Amount transferred */ -@property(nonatomic, readwrite) uint64_t amount; - -@end - #pragma mark - GetGroupActionsResponse_GetGroupActionsResponseV0_EmergencyActionEvent typedef GPB_ENUM(GetGroupActionsResponse_GetGroupActionsResponseV0_EmergencyActionEvent_FieldNumber) { @@ -6974,9 +6933,8 @@ typedef GPB_ENUM(GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Fi GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_Freeze = 3, GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_Unfreeze = 4, GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_DestroyFrozenFunds = 5, - GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_Transfer = 6, - GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_EmergencyAction = 7, - GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_TokenConfigUpdate = 8, + GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_EmergencyAction = 6, + GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_TokenConfigUpdate = 7, }; typedef GPB_ENUM(GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase) { @@ -6986,9 +6944,8 @@ typedef GPB_ENUM(GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Ty GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_Freeze = 3, GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_Unfreeze = 4, GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_DestroyFrozenFunds = 5, - GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_Transfer = 6, - GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_EmergencyAction = 7, - GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_TokenConfigUpdate = 8, + GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_EmergencyAction = 6, + GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_Type_OneOfCase_TokenConfigUpdate = 7, }; /** @@ -7013,9 +6970,6 @@ GPB_FINAL @interface GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEven /** Destroy frozen funds */ @property(nonatomic, readwrite, strong, null_resettable) GetGroupActionsResponse_GetGroupActionsResponseV0_DestroyFrozenFundsEvent *destroyFrozenFunds; -/** Transfer event details */ -@property(nonatomic, readwrite, strong, null_resettable) GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent *transfer; - /** Emergency action details */ @property(nonatomic, readwrite, strong, null_resettable) GetGroupActionsResponse_GetGroupActionsResponseV0_EmergencyActionEvent *emergencyAction; diff --git a/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.m b/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.m index f9375f49869..bc611b4ac7a 100644 --- a/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.m +++ b/packages/dapi-grpc/clients/platform/v0/objective-c/Platform.pbobjc.m @@ -128,11 +128,8 @@ GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_GroupActionEvent); GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_GroupActions); GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_MintEvent); -GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_PersonalEncryptedNote); -GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_SharedEncryptedNote); GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_TokenConfigUpdateEvent); GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent); -GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent); GPBObjCClassDeclaration(GetGroupActionsResponse_GetGroupActionsResponseV0_UnfreezeEvent); GPBObjCClassDeclaration(GetGroupInfoRequest); GPBObjCClassDeclaration(GetGroupInfoRequest_GetGroupInfoRequestV0); @@ -17586,96 +17583,6 @@ + (GPBDescriptor *)descriptor { @end -#pragma mark - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent - -@implementation GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent - -@dynamic recipientId; -@dynamic hasPublicNote, publicNote; -@dynamic hasSharedEncryptedNote, sharedEncryptedNote; -@dynamic hasPersonalEncryptedNote, personalEncryptedNote; -@dynamic amount; - -typedef struct GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_ { - uint32_t _has_storage_[1]; - NSData *recipientId; - NSString *publicNote; - GetGroupActionsResponse_GetGroupActionsResponseV0_SharedEncryptedNote *sharedEncryptedNote; - GetGroupActionsResponse_GetGroupActionsResponseV0_PersonalEncryptedNote *personalEncryptedNote; - uint64_t amount; -} GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_; - -// This method is threadsafe because it is initially called -// in +initialize for each subclass. -+ (GPBDescriptor *)descriptor { - static GPBDescriptor *descriptor = nil; - if (!descriptor) { - static GPBMessageFieldDescription fields[] = { - { - .name = "recipientId", - .dataTypeSpecific.clazz = Nil, - .number = GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_RecipientId, - .hasIndex = 0, - .offset = (uint32_t)offsetof(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_, recipientId), - .flags = (GPBFieldFlags)(GPBFieldOptional | GPBFieldClearHasIvarOnZero), - .dataType = GPBDataTypeBytes, - }, - { - .name = "publicNote", - .dataTypeSpecific.clazz = Nil, - .number = GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_PublicNote, - .hasIndex = 1, - .offset = (uint32_t)offsetof(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_, publicNote), - .flags = GPBFieldOptional, - .dataType = GPBDataTypeString, - }, - { - .name = "sharedEncryptedNote", - .dataTypeSpecific.clazz = GPBObjCClass(GetGroupActionsResponse_GetGroupActionsResponseV0_SharedEncryptedNote), - .number = GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_SharedEncryptedNote, - .hasIndex = 2, - .offset = (uint32_t)offsetof(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_, sharedEncryptedNote), - .flags = GPBFieldOptional, - .dataType = GPBDataTypeMessage, - }, - { - .name = "personalEncryptedNote", - .dataTypeSpecific.clazz = GPBObjCClass(GetGroupActionsResponse_GetGroupActionsResponseV0_PersonalEncryptedNote), - .number = GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_PersonalEncryptedNote, - .hasIndex = 3, - .offset = (uint32_t)offsetof(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_, personalEncryptedNote), - .flags = GPBFieldOptional, - .dataType = GPBDataTypeMessage, - }, - { - .name = "amount", - .dataTypeSpecific.clazz = Nil, - .number = GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent_FieldNumber_Amount, - .hasIndex = 4, - .offset = (uint32_t)offsetof(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_, amount), - .flags = (GPBFieldFlags)(GPBFieldOptional | GPBFieldClearHasIvarOnZero), - .dataType = GPBDataTypeUInt64, - }, - }; - GPBDescriptor *localDescriptor = - [GPBDescriptor allocDescriptorForClass:[GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent class] - rootClass:[PlatformRoot class] - file:PlatformRoot_FileDescriptor() - fields:fields - fieldCount:(uint32_t)(sizeof(fields) / sizeof(GPBMessageFieldDescription)) - storageSize:sizeof(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent__storage_) - flags:(GPBDescriptorInitializationFlags)(GPBDescriptorInitializationFlag_UsesClassRefs | GPBDescriptorInitializationFlag_Proto3OptionalKnown)]; - [localDescriptor setupContainingMessageClass:GPBObjCClass(GetGroupActionsResponse_GetGroupActionsResponseV0)]; - #if defined(DEBUG) && DEBUG - NSAssert(descriptor == nil, @"Startup recursed!"); - #endif // DEBUG - descriptor = localDescriptor; - } - return descriptor; -} - -@end - #pragma mark - GetGroupActionsResponse_GetGroupActionsResponseV0_EmergencyActionEvent @implementation GetGroupActionsResponse_GetGroupActionsResponseV0_EmergencyActionEvent @@ -18135,7 +18042,6 @@ @implementation GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent @dynamic freeze; @dynamic unfreeze; @dynamic destroyFrozenFunds; -@dynamic transfer; @dynamic emergencyAction; @dynamic tokenConfigUpdate; @@ -18146,7 +18052,6 @@ @implementation GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent GetGroupActionsResponse_GetGroupActionsResponseV0_FreezeEvent *freeze; GetGroupActionsResponse_GetGroupActionsResponseV0_UnfreezeEvent *unfreeze; GetGroupActionsResponse_GetGroupActionsResponseV0_DestroyFrozenFundsEvent *destroyFrozenFunds; - GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent *transfer; GetGroupActionsResponse_GetGroupActionsResponseV0_EmergencyActionEvent *emergencyAction; GetGroupActionsResponse_GetGroupActionsResponseV0_TokenConfigUpdateEvent *tokenConfigUpdate; } GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent__storage_; @@ -18202,15 +18107,6 @@ + (GPBDescriptor *)descriptor { .flags = GPBFieldOptional, .dataType = GPBDataTypeMessage, }, - { - .name = "transfer", - .dataTypeSpecific.clazz = GPBObjCClass(GetGroupActionsResponse_GetGroupActionsResponseV0_TransferEvent), - .number = GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent_FieldNumber_Transfer, - .hasIndex = -1, - .offset = (uint32_t)offsetof(GetGroupActionsResponse_GetGroupActionsResponseV0_TokenEvent__storage_, transfer), - .flags = GPBFieldOptional, - .dataType = GPBDataTypeMessage, - }, { .name = "emergencyAction", .dataTypeSpecific.clazz = GPBObjCClass(GetGroupActionsResponse_GetGroupActionsResponseV0_EmergencyActionEvent), diff --git a/packages/dapi-grpc/clients/platform/v0/python/platform_pb2.py b/packages/dapi-grpc/clients/platform/v0/python/platform_pb2.py index 9acd55ec163..2d51403b537 100644 --- a/packages/dapi-grpc/clients/platform/v0/python/platform_pb2.py +++ b/packages/dapi-grpc/clients/platform/v0/python/platform_pb2.py @@ -23,7 +23,7 @@ syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\x0eplatform.proto\x12\x19org.dash.platform.dapi.v0\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x81\x01\n\x05Proof\x12\x15\n\rgrovedb_proof\x18\x01 \x01(\x0c\x12\x13\n\x0bquorum_hash\x18\x02 \x01(\x0c\x12\x11\n\tsignature\x18\x03 \x01(\x0c\x12\r\n\x05round\x18\x04 \x01(\r\x12\x15\n\rblock_id_hash\x18\x05 \x01(\x0c\x12\x13\n\x0bquorum_type\x18\x06 \x01(\r\"\x98\x01\n\x10ResponseMetadata\x12\x12\n\x06height\x18\x01 \x01(\x04\x42\x02\x30\x01\x12 \n\x18\x63ore_chain_locked_height\x18\x02 \x01(\r\x12\r\n\x05\x65poch\x18\x03 \x01(\r\x12\x13\n\x07time_ms\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x18\n\x10protocol_version\x18\x05 \x01(\r\x12\x10\n\x08\x63hain_id\x18\x06 \x01(\t\"L\n\x1dStateTransitionBroadcastError\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\";\n\x1f\x42roadcastStateTransitionRequest\x12\x18\n\x10state_transition\x18\x01 \x01(\x0c\"\"\n BroadcastStateTransitionResponse\"\xa4\x01\n\x12GetIdentityRequest\x12P\n\x02v0\x18\x01 \x01(\x0b\x32\x42.org.dash.platform.dapi.v0.GetIdentityRequest.GetIdentityRequestV0H\x00\x1a\x31\n\x14GetIdentityRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xc1\x01\n\x17GetIdentityNonceRequest\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetIdentityNonceRequest.GetIdentityNonceRequestV0H\x00\x1a?\n\x19GetIdentityNonceRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xf6\x01\n\x1fGetIdentityContractNonceRequest\x12j\n\x02v0\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetIdentityContractNonceRequest.GetIdentityContractNonceRequestV0H\x00\x1a\\\n!GetIdentityContractNonceRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x13\n\x0b\x63ontract_id\x18\x02 \x01(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xc0\x01\n\x19GetIdentityBalanceRequest\x12^\n\x02v0\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetIdentityBalanceRequest.GetIdentityBalanceRequestV0H\x00\x1a\x38\n\x1bGetIdentityBalanceRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xec\x01\n$GetIdentityBalanceAndRevisionRequest\x12t\n\x02v0\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionRequest.GetIdentityBalanceAndRevisionRequestV0H\x00\x1a\x43\n&GetIdentityBalanceAndRevisionRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x9e\x02\n\x13GetIdentityResponse\x12R\n\x02v0\x18\x01 \x01(\x0b\x32\x44.org.dash.platform.dapi.v0.GetIdentityResponse.GetIdentityResponseV0H\x00\x1a\xa7\x01\n\x15GetIdentityResponseV0\x12\x12\n\x08identity\x18\x01 \x01(\x0cH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xbc\x02\n\x18GetIdentityNonceResponse\x12\\\n\x02v0\x18\x01 \x01(\x0b\x32N.org.dash.platform.dapi.v0.GetIdentityNonceResponse.GetIdentityNonceResponseV0H\x00\x1a\xb6\x01\n\x1aGetIdentityNonceResponseV0\x12\x1c\n\x0eidentity_nonce\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xe5\x02\n GetIdentityContractNonceResponse\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetIdentityContractNonceResponse.GetIdentityContractNonceResponseV0H\x00\x1a\xc7\x01\n\"GetIdentityContractNonceResponseV0\x12%\n\x17identity_contract_nonce\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xbd\x02\n\x1aGetIdentityBalanceResponse\x12`\n\x02v0\x18\x01 \x01(\x0b\x32R.org.dash.platform.dapi.v0.GetIdentityBalanceResponse.GetIdentityBalanceResponseV0H\x00\x1a\xb1\x01\n\x1cGetIdentityBalanceResponseV0\x12\x15\n\x07\x62\x61lance\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xb1\x04\n%GetIdentityBalanceAndRevisionResponse\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionResponse.GetIdentityBalanceAndRevisionResponseV0H\x00\x1a\x84\x03\n\'GetIdentityBalanceAndRevisionResponseV0\x12\x9b\x01\n\x14\x62\x61lance_and_revision\x18\x01 \x01(\x0b\x32{.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionResponse.GetIdentityBalanceAndRevisionResponseV0.BalanceAndRevisionH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a?\n\x12\x42\x61lanceAndRevision\x12\x13\n\x07\x62\x61lance\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x14\n\x08revision\x18\x02 \x01(\x04\x42\x02\x30\x01\x42\x08\n\x06resultB\t\n\x07version\"\xd1\x01\n\x0eKeyRequestType\x12\x36\n\x08\x61ll_keys\x18\x01 \x01(\x0b\x32\".org.dash.platform.dapi.v0.AllKeysH\x00\x12@\n\rspecific_keys\x18\x02 \x01(\x0b\x32\'.org.dash.platform.dapi.v0.SpecificKeysH\x00\x12:\n\nsearch_key\x18\x03 \x01(\x0b\x32$.org.dash.platform.dapi.v0.SearchKeyH\x00\x42\t\n\x07request\"\t\n\x07\x41llKeys\"\x1f\n\x0cSpecificKeys\x12\x0f\n\x07key_ids\x18\x01 \x03(\r\"\xb6\x01\n\tSearchKey\x12I\n\x0bpurpose_map\x18\x01 \x03(\x0b\x32\x34.org.dash.platform.dapi.v0.SearchKey.PurposeMapEntry\x1a^\n\x0fPurposeMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12:\n\x05value\x18\x02 \x01(\x0b\x32+.org.dash.platform.dapi.v0.SecurityLevelMap:\x02\x38\x01\"\xbf\x02\n\x10SecurityLevelMap\x12]\n\x12security_level_map\x18\x01 \x03(\x0b\x32\x41.org.dash.platform.dapi.v0.SecurityLevelMap.SecurityLevelMapEntry\x1aw\n\x15SecurityLevelMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12M\n\x05value\x18\x02 \x01(\x0e\x32>.org.dash.platform.dapi.v0.SecurityLevelMap.KeyKindRequestType:\x02\x38\x01\"S\n\x12KeyKindRequestType\x12\x1f\n\x1b\x43URRENT_KEY_OF_KIND_REQUEST\x10\x00\x12\x1c\n\x18\x41LL_KEYS_OF_KIND_REQUEST\x10\x01\"\xda\x02\n\x16GetIdentityKeysRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetIdentityKeysRequest.GetIdentityKeysRequestV0H\x00\x1a\xda\x01\n\x18GetIdentityKeysRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12?\n\x0crequest_type\x18\x02 \x01(\x0b\x32).org.dash.platform.dapi.v0.KeyRequestType\x12+\n\x05limit\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12,\n\x06offset\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\t\n\x07version\"\x99\x03\n\x17GetIdentityKeysResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetIdentityKeysResponse.GetIdentityKeysResponseV0H\x00\x1a\x96\x02\n\x19GetIdentityKeysResponseV0\x12\x61\n\x04keys\x18\x01 \x01(\x0b\x32Q.org.dash.platform.dapi.v0.GetIdentityKeysResponse.GetIdentityKeysResponseV0.KeysH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x1a\n\x04Keys\x12\x12\n\nkeys_bytes\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\xef\x02\n GetIdentitiesContractKeysRequest\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetIdentitiesContractKeysRequest.GetIdentitiesContractKeysRequestV0H\x00\x1a\xd1\x01\n\"GetIdentitiesContractKeysRequestV0\x12\x16\n\x0eidentities_ids\x18\x01 \x03(\x0c\x12\x13\n\x0b\x63ontract_id\x18\x02 \x01(\x0c\x12\x1f\n\x12\x64ocument_type_name\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x37\n\x08purposes\x18\x04 \x03(\x0e\x32%.org.dash.platform.dapi.v0.KeyPurpose\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\x15\n\x13_document_type_nameB\t\n\x07version\"\xdf\x06\n!GetIdentitiesContractKeysResponse\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0H\x00\x1a\xbe\x05\n#GetIdentitiesContractKeysResponseV0\x12\x8a\x01\n\x0fidentities_keys\x18\x01 \x01(\x0b\x32o.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0.IdentitiesKeysH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aY\n\x0bPurposeKeys\x12\x36\n\x07purpose\x18\x01 \x01(\x0e\x32%.org.dash.platform.dapi.v0.KeyPurpose\x12\x12\n\nkeys_bytes\x18\x02 \x03(\x0c\x1a\x9f\x01\n\x0cIdentityKeys\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12z\n\x04keys\x18\x02 \x03(\x0b\x32l.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0.PurposeKeys\x1a\x90\x01\n\x0eIdentitiesKeys\x12~\n\x07\x65ntries\x18\x01 \x03(\x0b\x32m.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0.IdentityKeysB\x08\n\x06resultB\t\n\x07version\"\xa4\x02\n*GetEvonodesProposedEpochBlocksByIdsRequest\x12\x80\x01\n\x02v0\x18\x01 \x01(\x0b\x32r.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByIdsRequest.GetEvonodesProposedEpochBlocksByIdsRequestV0H\x00\x1ah\n,GetEvonodesProposedEpochBlocksByIdsRequestV0\x12\x12\n\x05\x65poch\x18\x01 \x01(\rH\x00\x88\x01\x01\x12\x0b\n\x03ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\x08\n\x06_epochB\t\n\x07version\"\x92\x06\n&GetEvonodesProposedEpochBlocksResponse\x12x\n\x02v0\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse.GetEvonodesProposedEpochBlocksResponseV0H\x00\x1a\xe2\x04\n(GetEvonodesProposedEpochBlocksResponseV0\x12\xb1\x01\n#evonodes_proposed_block_counts_info\x18\x01 \x01(\x0b\x32\x81\x01.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse.GetEvonodesProposedEpochBlocksResponseV0.EvonodesProposedBlocksH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a?\n\x15\x45vonodeProposedBlocks\x12\x13\n\x0bpro_tx_hash\x18\x01 \x01(\x0c\x12\x11\n\x05\x63ount\x18\x02 \x01(\x04\x42\x02\x30\x01\x1a\xc4\x01\n\x16\x45vonodesProposedBlocks\x12\xa9\x01\n\x1e\x65vonodes_proposed_block_counts\x18\x01 \x03(\x0b\x32\x80\x01.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse.GetEvonodesProposedEpochBlocksResponseV0.EvonodeProposedBlocksB\x08\n\x06resultB\t\n\x07version\"\xf2\x02\n,GetEvonodesProposedEpochBlocksByRangeRequest\x12\x84\x01\n\x02v0\x18\x01 \x01(\x0b\x32v.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByRangeRequest.GetEvonodesProposedEpochBlocksByRangeRequestV0H\x00\x1a\xaf\x01\n.GetEvonodesProposedEpochBlocksByRangeRequestV0\x12\x12\n\x05\x65poch\x18\x01 \x01(\rH\x01\x88\x01\x01\x12\x12\n\x05limit\x18\x02 \x01(\rH\x02\x88\x01\x01\x12\x15\n\x0bstart_after\x18\x03 \x01(\x0cH\x00\x12\x12\n\x08start_at\x18\x04 \x01(\x0cH\x00\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\x07\n\x05startB\x08\n\x06_epochB\x08\n\x06_limitB\t\n\x07version\"\xcd\x01\n\x1cGetIdentitiesBalancesRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetIdentitiesBalancesRequest.GetIdentitiesBalancesRequestV0H\x00\x1a<\n\x1eGetIdentitiesBalancesRequestV0\x12\x0b\n\x03ids\x18\x01 \x03(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x9f\x05\n\x1dGetIdentitiesBalancesResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse.GetIdentitiesBalancesResponseV0H\x00\x1a\x8a\x04\n\x1fGetIdentitiesBalancesResponseV0\x12\x8a\x01\n\x13identities_balances\x18\x01 \x01(\x0b\x32k.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse.GetIdentitiesBalancesResponseV0.IdentitiesBalancesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aL\n\x0fIdentityBalance\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x18\n\x07\x62\x61lance\x18\x02 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01\x42\n\n\x08_balance\x1a\x8f\x01\n\x12IdentitiesBalances\x12y\n\x07\x65ntries\x18\x01 \x03(\x0b\x32h.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse.GetIdentitiesBalancesResponseV0.IdentityBalanceB\x08\n\x06resultB\t\n\x07version\"\xdb\x0f\n\x10GetProofsRequest\x12L\n\x02v0\x18\x01 \x01(\x0b\x32>.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0H\x00\x1a\xed\x0e\n\x12GetProofsRequestV0\x12\x62\n\nidentities\x18\x01 \x03(\x0b\x32N.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityRequest\x12\x61\n\tcontracts\x18\x02 \x03(\x0b\x32N.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.ContractRequest\x12\x61\n\tdocuments\x18\x03 \x03(\x0b\x32N.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.DocumentRequest\x12_\n\x05votes\x18\x04 \x03(\x0b\x32P.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.VoteStatusRequest\x12{\n\x17identity_token_balances\x18\x05 \x03(\x0b\x32Z.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityTokenBalanceRequest\x12u\n\x14identity_token_infos\x18\x06 \x03(\x0b\x32W.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityTokenInfoRequest\x12i\n\x0etoken_statuses\x18\x07 \x03(\x0b\x32Q.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.TokenStatusRequest\x1a\xd5\x02\n\x0f\x44ocumentRequest\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x15\n\rdocument_type\x18\x02 \x01(\t\x12#\n\x1b\x64ocument_type_keeps_history\x18\x03 \x01(\x08\x12\x13\n\x0b\x64ocument_id\x18\x04 \x01(\x0c\x12\x89\x01\n\x19\x64ocument_contested_status\x18\x05 \x01(\x0e\x32\x66.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.DocumentRequest.DocumentContestedStatus\"P\n\x17\x44ocumentContestedStatus\x12\x11\n\rNOT_CONTESTED\x10\x00\x12\x13\n\x0fMAYBE_CONTESTED\x10\x01\x12\r\n\tCONTESTED\x10\x02\x1a\xd1\x01\n\x0fIdentityRequest\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12i\n\x0crequest_type\x18\x02 \x01(\x0e\x32S.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityRequest.Type\">\n\x04Type\x12\x11\n\rFULL_IDENTITY\x10\x00\x12\x0b\n\x07\x42\x41LANCE\x10\x01\x12\x08\n\x04KEYS\x10\x02\x12\x0c\n\x08REVISION\x10\x03\x1a&\n\x0f\x43ontractRequest\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x1a\xe7\x02\n\x11VoteStatusRequest\x12\xa5\x01\n&contested_resource_vote_status_request\x18\x01 \x01(\x0b\x32s.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.VoteStatusRequest.ContestedResourceVoteStatusRequestH\x00\x1a\x99\x01\n\"ContestedResourceVoteStatusRequest\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x14\n\x0cindex_values\x18\x04 \x03(\x0c\x12\x18\n\x10voter_identifier\x18\x05 \x01(\x0c\x42\x0e\n\x0crequest_type\x1a\x44\n\x1bIdentityTokenBalanceRequest\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x13\n\x0bidentity_id\x18\x02 \x01(\x0c\x1a\x41\n\x18IdentityTokenInfoRequest\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x13\n\x0bidentity_id\x18\x02 \x01(\x0c\x1a&\n\x12TokenStatusRequest\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x42\t\n\x07version\"\x82\x02\n\x11GetProofsResponse\x12N\n\x02v0\x18\x01 \x01(\x0b\x32@.org.dash.platform.dapi.v0.GetProofsResponse.GetProofsResponseV0H\x00\x1a\x91\x01\n\x13GetProofsResponseV0\x12\x31\n\x05proof\x18\x01 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x02 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xb4\x01\n\x16GetDataContractRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetDataContractRequest.GetDataContractRequestV0H\x00\x1a\x35\n\x18GetDataContractRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xb3\x02\n\x17GetDataContractResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetDataContractResponse.GetDataContractResponseV0H\x00\x1a\xb0\x01\n\x19GetDataContractResponseV0\x12\x17\n\rdata_contract\x18\x01 \x01(\x0cH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xb9\x01\n\x17GetDataContractsRequest\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetDataContractsRequest.GetDataContractsRequestV0H\x00\x1a\x37\n\x19GetDataContractsRequestV0\x12\x0b\n\x03ids\x18\x01 \x03(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xcf\x04\n\x18GetDataContractsResponse\x12\\\n\x02v0\x18\x01 \x01(\x0b\x32N.org.dash.platform.dapi.v0.GetDataContractsResponse.GetDataContractsResponseV0H\x00\x1a[\n\x11\x44\x61taContractEntry\x12\x12\n\nidentifier\x18\x01 \x01(\x0c\x12\x32\n\rdata_contract\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x1au\n\rDataContracts\x12\x64\n\x15\x64\x61ta_contract_entries\x18\x01 \x03(\x0b\x32\x45.org.dash.platform.dapi.v0.GetDataContractsResponse.DataContractEntry\x1a\xf5\x01\n\x1aGetDataContractsResponseV0\x12[\n\x0e\x64\x61ta_contracts\x18\x01 \x01(\x0b\x32\x41.org.dash.platform.dapi.v0.GetDataContractsResponse.DataContractsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xc5\x02\n\x1dGetDataContractHistoryRequest\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetDataContractHistoryRequest.GetDataContractHistoryRequestV0H\x00\x1a\xb0\x01\n\x1fGetDataContractHistoryRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12+\n\x05limit\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12,\n\x06offset\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x17\n\x0bstart_at_ms\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\t\n\x07version\"\xb2\x05\n\x1eGetDataContractHistoryResponse\x12h\n\x02v0\x18\x01 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetDataContractHistoryResponse.GetDataContractHistoryResponseV0H\x00\x1a\x9a\x04\n GetDataContractHistoryResponseV0\x12\x8f\x01\n\x15\x64\x61ta_contract_history\x18\x01 \x01(\x0b\x32n.org.dash.platform.dapi.v0.GetDataContractHistoryResponse.GetDataContractHistoryResponseV0.DataContractHistoryH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a;\n\x18\x44\x61taContractHistoryEntry\x12\x10\n\x04\x64\x61te\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\r\n\x05value\x18\x02 \x01(\x0c\x1a\xaa\x01\n\x13\x44\x61taContractHistory\x12\x92\x01\n\x15\x64\x61ta_contract_entries\x18\x01 \x03(\x0b\x32s.org.dash.platform.dapi.v0.GetDataContractHistoryResponse.GetDataContractHistoryResponseV0.DataContractHistoryEntryB\x08\n\x06resultB\t\n\x07version\"\xb2\x02\n\x13GetDocumentsRequest\x12R\n\x02v0\x18\x01 \x01(\x0b\x32\x44.org.dash.platform.dapi.v0.GetDocumentsRequest.GetDocumentsRequestV0H\x00\x1a\xbb\x01\n\x15GetDocumentsRequestV0\x12\x18\n\x10\x64\x61ta_contract_id\x18\x01 \x01(\x0c\x12\x15\n\rdocument_type\x18\x02 \x01(\t\x12\r\n\x05where\x18\x03 \x01(\x0c\x12\x10\n\x08order_by\x18\x04 \x01(\x0c\x12\r\n\x05limit\x18\x05 \x01(\r\x12\x15\n\x0bstart_after\x18\x06 \x01(\x0cH\x00\x12\x12\n\x08start_at\x18\x07 \x01(\x0cH\x00\x12\r\n\x05prove\x18\x08 \x01(\x08\x42\x07\n\x05startB\t\n\x07version\"\x95\x03\n\x14GetDocumentsResponse\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetDocumentsResponse.GetDocumentsResponseV0H\x00\x1a\x9b\x02\n\x16GetDocumentsResponseV0\x12\x65\n\tdocuments\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetDocumentsResponse.GetDocumentsResponseV0.DocumentsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x1e\n\tDocuments\x12\x11\n\tdocuments\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\xed\x01\n!GetIdentityByPublicKeyHashRequest\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashRequest.GetIdentityByPublicKeyHashRequestV0H\x00\x1aM\n#GetIdentityByPublicKeyHashRequestV0\x12\x17\n\x0fpublic_key_hash\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xda\x02\n\"GetIdentityByPublicKeyHashResponse\x12p\n\x02v0\x18\x01 \x01(\x0b\x32\x62.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashResponse.GetIdentityByPublicKeyHashResponseV0H\x00\x1a\xb6\x01\n$GetIdentityByPublicKeyHashResponseV0\x12\x12\n\x08identity\x18\x01 \x01(\x0cH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xfb\x01\n#WaitForStateTransitionResultRequest\x12r\n\x02v0\x18\x01 \x01(\x0b\x32\x64.org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest.WaitForStateTransitionResultRequestV0H\x00\x1aU\n%WaitForStateTransitionResultRequestV0\x12\x1d\n\x15state_transition_hash\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x99\x03\n$WaitForStateTransitionResultResponse\x12t\n\x02v0\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse.WaitForStateTransitionResultResponseV0H\x00\x1a\xef\x01\n&WaitForStateTransitionResultResponseV0\x12I\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x38.org.dash.platform.dapi.v0.StateTransitionBroadcastErrorH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xc4\x01\n\x19GetConsensusParamsRequest\x12^\n\x02v0\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetConsensusParamsRequest.GetConsensusParamsRequestV0H\x00\x1a<\n\x1bGetConsensusParamsRequestV0\x12\x0e\n\x06height\x18\x01 \x01(\x05\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x9c\x04\n\x1aGetConsensusParamsResponse\x12`\n\x02v0\x18\x01 \x01(\x0b\x32R.org.dash.platform.dapi.v0.GetConsensusParamsResponse.GetConsensusParamsResponseV0H\x00\x1aP\n\x14\x43onsensusParamsBlock\x12\x11\n\tmax_bytes\x18\x01 \x01(\t\x12\x0f\n\x07max_gas\x18\x02 \x01(\t\x12\x14\n\x0ctime_iota_ms\x18\x03 \x01(\t\x1a\x62\n\x17\x43onsensusParamsEvidence\x12\x1a\n\x12max_age_num_blocks\x18\x01 \x01(\t\x12\x18\n\x10max_age_duration\x18\x02 \x01(\t\x12\x11\n\tmax_bytes\x18\x03 \x01(\t\x1a\xda\x01\n\x1cGetConsensusParamsResponseV0\x12Y\n\x05\x62lock\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetConsensusParamsResponse.ConsensusParamsBlock\x12_\n\x08\x65vidence\x18\x02 \x01(\x0b\x32M.org.dash.platform.dapi.v0.GetConsensusParamsResponse.ConsensusParamsEvidenceB\t\n\x07version\"\xe4\x01\n%GetProtocolVersionUpgradeStateRequest\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateRequest.GetProtocolVersionUpgradeStateRequestV0H\x00\x1a\x38\n\'GetProtocolVersionUpgradeStateRequestV0\x12\r\n\x05prove\x18\x01 \x01(\x08\x42\t\n\x07version\"\xb5\x05\n&GetProtocolVersionUpgradeStateResponse\x12x\n\x02v0\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse.GetProtocolVersionUpgradeStateResponseV0H\x00\x1a\x85\x04\n(GetProtocolVersionUpgradeStateResponseV0\x12\x87\x01\n\x08versions\x18\x01 \x01(\x0b\x32s.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse.GetProtocolVersionUpgradeStateResponseV0.VersionsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x96\x01\n\x08Versions\x12\x89\x01\n\x08versions\x18\x01 \x03(\x0b\x32w.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse.GetProtocolVersionUpgradeStateResponseV0.VersionEntry\x1a:\n\x0cVersionEntry\x12\x16\n\x0eversion_number\x18\x01 \x01(\r\x12\x12\n\nvote_count\x18\x02 \x01(\rB\x08\n\x06resultB\t\n\x07version\"\xa3\x02\n*GetProtocolVersionUpgradeVoteStatusRequest\x12\x80\x01\n\x02v0\x18\x01 \x01(\x0b\x32r.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusRequest.GetProtocolVersionUpgradeVoteStatusRequestV0H\x00\x1ag\n,GetProtocolVersionUpgradeVoteStatusRequestV0\x12\x19\n\x11start_pro_tx_hash\x18\x01 \x01(\x0c\x12\r\n\x05\x63ount\x18\x02 \x01(\r\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xef\x05\n+GetProtocolVersionUpgradeVoteStatusResponse\x12\x82\x01\n\x02v0\x18\x01 \x01(\x0b\x32t.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse.GetProtocolVersionUpgradeVoteStatusResponseV0H\x00\x1a\xaf\x04\n-GetProtocolVersionUpgradeVoteStatusResponseV0\x12\x98\x01\n\x08versions\x18\x01 \x01(\x0b\x32\x83\x01.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse.GetProtocolVersionUpgradeVoteStatusResponseV0.VersionSignalsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\xaf\x01\n\x0eVersionSignals\x12\x9c\x01\n\x0fversion_signals\x18\x01 \x03(\x0b\x32\x82\x01.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse.GetProtocolVersionUpgradeVoteStatusResponseV0.VersionSignal\x1a\x35\n\rVersionSignal\x12\x13\n\x0bpro_tx_hash\x18\x01 \x01(\x0c\x12\x0f\n\x07version\x18\x02 \x01(\rB\x08\n\x06resultB\t\n\x07version\"\xf5\x01\n\x14GetEpochsInfoRequest\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetEpochsInfoRequest.GetEpochsInfoRequestV0H\x00\x1a|\n\x16GetEpochsInfoRequestV0\x12\x31\n\x0bstart_epoch\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\r\n\x05\x63ount\x18\x02 \x01(\r\x12\x11\n\tascending\x18\x03 \x01(\x08\x12\r\n\x05prove\x18\x04 \x01(\x08\x42\t\n\x07version\"\x99\x05\n\x15GetEpochsInfoResponse\x12V\n\x02v0\x18\x01 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetEpochsInfoResponse.GetEpochsInfoResponseV0H\x00\x1a\x9c\x04\n\x17GetEpochsInfoResponseV0\x12\x65\n\x06\x65pochs\x18\x01 \x01(\x0b\x32S.org.dash.platform.dapi.v0.GetEpochsInfoResponse.GetEpochsInfoResponseV0.EpochInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1au\n\nEpochInfos\x12g\n\x0b\x65poch_infos\x18\x01 \x03(\x0b\x32R.org.dash.platform.dapi.v0.GetEpochsInfoResponse.GetEpochsInfoResponseV0.EpochInfo\x1a\xa6\x01\n\tEpochInfo\x12\x0e\n\x06number\x18\x01 \x01(\r\x12\x1e\n\x12\x66irst_block_height\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x1f\n\x17\x66irst_core_block_height\x18\x03 \x01(\r\x12\x16\n\nstart_time\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x16\n\x0e\x66\x65\x65_multiplier\x18\x05 \x01(\x01\x12\x18\n\x10protocol_version\x18\x06 \x01(\rB\x08\n\x06resultB\t\n\x07version\"\xde\x04\n\x1cGetContestedResourcesRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetContestedResourcesRequest.GetContestedResourcesRequestV0H\x00\x1a\xcc\x03\n\x1eGetContestedResourcesRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x1a\n\x12start_index_values\x18\x04 \x03(\x0c\x12\x18\n\x10\x65nd_index_values\x18\x05 \x03(\x0c\x12\x89\x01\n\x13start_at_value_info\x18\x06 \x01(\x0b\x32g.org.dash.platform.dapi.v0.GetContestedResourcesRequest.GetContestedResourcesRequestV0.StartAtValueInfoH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x07 \x01(\rH\x01\x88\x01\x01\x12\x17\n\x0forder_ascending\x18\x08 \x01(\x08\x12\r\n\x05prove\x18\t \x01(\x08\x1a\x45\n\x10StartAtValueInfo\x12\x13\n\x0bstart_value\x18\x01 \x01(\x0c\x12\x1c\n\x14start_value_included\x18\x02 \x01(\x08\x42\x16\n\x14_start_at_value_infoB\x08\n\x06_countB\t\n\x07version\"\x88\x04\n\x1dGetContestedResourcesResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetContestedResourcesResponse.GetContestedResourcesResponseV0H\x00\x1a\xf3\x02\n\x1fGetContestedResourcesResponseV0\x12\x95\x01\n\x19\x63ontested_resource_values\x18\x01 \x01(\x0b\x32p.org.dash.platform.dapi.v0.GetContestedResourcesResponse.GetContestedResourcesResponseV0.ContestedResourceValuesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a<\n\x17\x43ontestedResourceValues\x12!\n\x19\x63ontested_resource_values\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\xd2\x05\n\x1cGetVotePollsByEndDateRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest.GetVotePollsByEndDateRequestV0H\x00\x1a\xc0\x04\n\x1eGetVotePollsByEndDateRequestV0\x12\x84\x01\n\x0fstart_time_info\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest.GetVotePollsByEndDateRequestV0.StartAtTimeInfoH\x00\x88\x01\x01\x12\x80\x01\n\rend_time_info\x18\x02 \x01(\x0b\x32\x64.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest.GetVotePollsByEndDateRequestV0.EndAtTimeInfoH\x01\x88\x01\x01\x12\x12\n\x05limit\x18\x03 \x01(\rH\x02\x88\x01\x01\x12\x13\n\x06offset\x18\x04 \x01(\rH\x03\x88\x01\x01\x12\x11\n\tascending\x18\x05 \x01(\x08\x12\r\n\x05prove\x18\x06 \x01(\x08\x1aI\n\x0fStartAtTimeInfo\x12\x19\n\rstart_time_ms\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x1b\n\x13start_time_included\x18\x02 \x01(\x08\x1a\x43\n\rEndAtTimeInfo\x12\x17\n\x0b\x65nd_time_ms\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x19\n\x11\x65nd_time_included\x18\x02 \x01(\x08\x42\x12\n\x10_start_time_infoB\x10\n\x0e_end_time_infoB\x08\n\x06_limitB\t\n\x07_offsetB\t\n\x07version\"\x83\x06\n\x1dGetVotePollsByEndDateResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse.GetVotePollsByEndDateResponseV0H\x00\x1a\xee\x04\n\x1fGetVotePollsByEndDateResponseV0\x12\x9c\x01\n\x18vote_polls_by_timestamps\x18\x01 \x01(\x0b\x32x.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse.GetVotePollsByEndDateResponseV0.SerializedVotePollsByTimestampsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aV\n\x1eSerializedVotePollsByTimestamp\x12\x15\n\ttimestamp\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x1d\n\x15serialized_vote_polls\x18\x02 \x03(\x0c\x1a\xd7\x01\n\x1fSerializedVotePollsByTimestamps\x12\x99\x01\n\x18vote_polls_by_timestamps\x18\x01 \x03(\x0b\x32w.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse.GetVotePollsByEndDateResponseV0.SerializedVotePollsByTimestamp\x12\x18\n\x10\x66inished_results\x18\x02 \x01(\x08\x42\x08\n\x06resultB\t\n\x07version\"\xff\x06\n$GetContestedResourceVoteStateRequest\x12t\n\x02v0\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest.GetContestedResourceVoteStateRequestV0H\x00\x1a\xd5\x05\n&GetContestedResourceVoteStateRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x14\n\x0cindex_values\x18\x04 \x03(\x0c\x12\x86\x01\n\x0bresult_type\x18\x05 \x01(\x0e\x32q.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest.GetContestedResourceVoteStateRequestV0.ResultType\x12\x36\n.allow_include_locked_and_abstaining_vote_tally\x18\x06 \x01(\x08\x12\xa3\x01\n\x18start_at_identifier_info\x18\x07 \x01(\x0b\x32|.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest.GetContestedResourceVoteStateRequestV0.StartAtIdentifierInfoH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x08 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\t \x01(\x08\x1aT\n\x15StartAtIdentifierInfo\x12\x18\n\x10start_identifier\x18\x01 \x01(\x0c\x12!\n\x19start_identifier_included\x18\x02 \x01(\x08\"I\n\nResultType\x12\r\n\tDOCUMENTS\x10\x00\x12\x0e\n\nVOTE_TALLY\x10\x01\x12\x1c\n\x18\x44OCUMENTS_AND_VOTE_TALLY\x10\x02\x42\x1b\n\x19_start_at_identifier_infoB\x08\n\x06_countB\t\n\x07version\"\x94\x0c\n%GetContestedResourceVoteStateResponse\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0H\x00\x1a\xe7\n\n\'GetContestedResourceVoteStateResponseV0\x12\xae\x01\n\x1d\x63ontested_resource_contenders\x18\x01 \x01(\x0b\x32\x84\x01.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.ContestedResourceContendersH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\xda\x03\n\x10\x46inishedVoteInfo\x12\xad\x01\n\x15\x66inished_vote_outcome\x18\x01 \x01(\x0e\x32\x8d\x01.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.FinishedVoteInfo.FinishedVoteOutcome\x12\x1f\n\x12won_by_identity_id\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x12$\n\x18\x66inished_at_block_height\x18\x03 \x01(\x04\x42\x02\x30\x01\x12%\n\x1d\x66inished_at_core_block_height\x18\x04 \x01(\r\x12%\n\x19\x66inished_at_block_time_ms\x18\x05 \x01(\x04\x42\x02\x30\x01\x12\x19\n\x11\x66inished_at_epoch\x18\x06 \x01(\r\"O\n\x13\x46inishedVoteOutcome\x12\x14\n\x10TOWARDS_IDENTITY\x10\x00\x12\n\n\x06LOCKED\x10\x01\x12\x16\n\x12NO_PREVIOUS_WINNER\x10\x02\x42\x15\n\x13_won_by_identity_id\x1a\xc4\x03\n\x1b\x43ontestedResourceContenders\x12\x86\x01\n\ncontenders\x18\x01 \x03(\x0b\x32r.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.Contender\x12\x1f\n\x12\x61\x62stain_vote_tally\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x1c\n\x0flock_vote_tally\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\x9a\x01\n\x12\x66inished_vote_info\x18\x04 \x01(\x0b\x32y.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.FinishedVoteInfoH\x02\x88\x01\x01\x42\x15\n\x13_abstain_vote_tallyB\x12\n\x10_lock_vote_tallyB\x15\n\x13_finished_vote_info\x1ak\n\tContender\x12\x12\n\nidentifier\x18\x01 \x01(\x0c\x12\x17\n\nvote_count\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x15\n\x08\x64ocument\x18\x03 \x01(\x0cH\x01\x88\x01\x01\x42\r\n\x0b_vote_countB\x0b\n\t_documentB\x08\n\x06resultB\t\n\x07version\"\xd5\x05\n,GetContestedResourceVotersForIdentityRequest\x12\x84\x01\n\x02v0\x18\x01 \x01(\x0b\x32v.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityRequest.GetContestedResourceVotersForIdentityRequestV0H\x00\x1a\x92\x04\n.GetContestedResourceVotersForIdentityRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x14\n\x0cindex_values\x18\x04 \x03(\x0c\x12\x15\n\rcontestant_id\x18\x05 \x01(\x0c\x12\xb4\x01\n\x18start_at_identifier_info\x18\x06 \x01(\x0b\x32\x8c\x01.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityRequest.GetContestedResourceVotersForIdentityRequestV0.StartAtIdentifierInfoH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x07 \x01(\rH\x01\x88\x01\x01\x12\x17\n\x0forder_ascending\x18\x08 \x01(\x08\x12\r\n\x05prove\x18\t \x01(\x08\x1aT\n\x15StartAtIdentifierInfo\x12\x18\n\x10start_identifier\x18\x01 \x01(\x0c\x12!\n\x19start_identifier_included\x18\x02 \x01(\x08\x42\x1b\n\x19_start_at_identifier_infoB\x08\n\x06_countB\t\n\x07version\"\xf1\x04\n-GetContestedResourceVotersForIdentityResponse\x12\x86\x01\n\x02v0\x18\x01 \x01(\x0b\x32x.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityResponse.GetContestedResourceVotersForIdentityResponseV0H\x00\x1a\xab\x03\n/GetContestedResourceVotersForIdentityResponseV0\x12\xb6\x01\n\x19\x63ontested_resource_voters\x18\x01 \x01(\x0b\x32\x90\x01.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityResponse.GetContestedResourceVotersForIdentityResponseV0.ContestedResourceVotersH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x43\n\x17\x43ontestedResourceVoters\x12\x0e\n\x06voters\x18\x01 \x03(\x0c\x12\x18\n\x10\x66inished_results\x18\x02 \x01(\x08\x42\x08\n\x06resultB\t\n\x07version\"\xad\x05\n(GetContestedResourceIdentityVotesRequest\x12|\n\x02v0\x18\x01 \x01(\x0b\x32n.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesRequest.GetContestedResourceIdentityVotesRequestV0H\x00\x1a\xf7\x03\n*GetContestedResourceIdentityVotesRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12+\n\x05limit\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12,\n\x06offset\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x17\n\x0forder_ascending\x18\x04 \x01(\x08\x12\xae\x01\n\x1astart_at_vote_poll_id_info\x18\x05 \x01(\x0b\x32\x84\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesRequest.GetContestedResourceIdentityVotesRequestV0.StartAtVotePollIdInfoH\x00\x88\x01\x01\x12\r\n\x05prove\x18\x06 \x01(\x08\x1a\x61\n\x15StartAtVotePollIdInfo\x12 \n\x18start_at_poll_identifier\x18\x01 \x01(\x0c\x12&\n\x1estart_poll_identifier_included\x18\x02 \x01(\x08\x42\x1d\n\x1b_start_at_vote_poll_id_infoB\t\n\x07version\"\xc8\n\n)GetContestedResourceIdentityVotesResponse\x12~\n\x02v0\x18\x01 \x01(\x0b\x32p.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0H\x00\x1a\x8f\t\n+GetContestedResourceIdentityVotesResponseV0\x12\xa1\x01\n\x05votes\x18\x01 \x01(\x0b\x32\x8f\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ContestedResourceIdentityVotesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\xf7\x01\n\x1e\x43ontestedResourceIdentityVotes\x12\xba\x01\n!contested_resource_identity_votes\x18\x01 \x03(\x0b\x32\x8e\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ContestedResourceIdentityVote\x12\x18\n\x10\x66inished_results\x18\x02 \x01(\x08\x1a\xad\x02\n\x12ResourceVoteChoice\x12\xad\x01\n\x10vote_choice_type\x18\x01 \x01(\x0e\x32\x92\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ResourceVoteChoice.VoteChoiceType\x12\x18\n\x0bidentity_id\x18\x02 \x01(\x0cH\x00\x88\x01\x01\"=\n\x0eVoteChoiceType\x12\x14\n\x10TOWARDS_IDENTITY\x10\x00\x12\x0b\n\x07\x41\x42STAIN\x10\x01\x12\x08\n\x04LOCK\x10\x02\x42\x0e\n\x0c_identity_id\x1a\x95\x02\n\x1d\x43ontestedResourceIdentityVote\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\'\n\x1fserialized_index_storage_values\x18\x03 \x03(\x0c\x12\x99\x01\n\x0bvote_choice\x18\x04 \x01(\x0b\x32\x83\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ResourceVoteChoiceB\x08\n\x06resultB\t\n\x07version\"\xf0\x01\n%GetPrefundedSpecializedBalanceRequest\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceRequest.GetPrefundedSpecializedBalanceRequestV0H\x00\x1a\x44\n\'GetPrefundedSpecializedBalanceRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xed\x02\n&GetPrefundedSpecializedBalanceResponse\x12x\n\x02v0\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceResponse.GetPrefundedSpecializedBalanceResponseV0H\x00\x1a\xbd\x01\n(GetPrefundedSpecializedBalanceResponseV0\x12\x15\n\x07\x62\x61lance\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xd0\x01\n GetTotalCreditsInPlatformRequest\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformRequest.GetTotalCreditsInPlatformRequestV0H\x00\x1a\x33\n\"GetTotalCreditsInPlatformRequestV0\x12\r\n\x05prove\x18\x01 \x01(\x08\x42\t\n\x07version\"\xd9\x02\n!GetTotalCreditsInPlatformResponse\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformResponse.GetTotalCreditsInPlatformResponseV0H\x00\x1a\xb8\x01\n#GetTotalCreditsInPlatformResponseV0\x12\x15\n\x07\x63redits\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xc4\x01\n\x16GetPathElementsRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetPathElementsRequest.GetPathElementsRequestV0H\x00\x1a\x45\n\x18GetPathElementsRequestV0\x12\x0c\n\x04path\x18\x01 \x03(\x0c\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xa3\x03\n\x17GetPathElementsResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetPathElementsResponse.GetPathElementsResponseV0H\x00\x1a\xa0\x02\n\x19GetPathElementsResponseV0\x12i\n\x08\x65lements\x18\x01 \x01(\x0b\x32U.org.dash.platform.dapi.v0.GetPathElementsResponse.GetPathElementsResponseV0.ElementsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x1c\n\x08\x45lements\x12\x10\n\x08\x65lements\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\x81\x01\n\x10GetStatusRequest\x12L\n\x02v0\x18\x01 \x01(\x0b\x32>.org.dash.platform.dapi.v0.GetStatusRequest.GetStatusRequestV0H\x00\x1a\x14\n\x12GetStatusRequestV0B\t\n\x07version\"\xd0\x10\n\x11GetStatusResponse\x12N\n\x02v0\x18\x01 \x01(\x0b\x32@.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0H\x00\x1a\xdf\x0f\n\x13GetStatusResponseV0\x12Y\n\x07version\x18\x01 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version\x12S\n\x04node\x18\x02 \x01(\x0b\x32\x45.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Node\x12U\n\x05\x63hain\x18\x03 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Chain\x12Y\n\x07network\x18\x04 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Network\x12^\n\nstate_sync\x18\x05 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.StateSync\x12S\n\x04time\x18\x06 \x01(\x0b\x32\x45.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Time\x1a\xee\x04\n\x07Version\x12\x63\n\x08software\x18\x01 \x01(\x0b\x32Q.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Software\x12\x63\n\x08protocol\x18\x02 \x01(\x0b\x32Q.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Protocol\x1a^\n\x08Software\x12\x0c\n\x04\x64\x61pi\x18\x01 \x01(\t\x12\x12\n\x05\x64rive\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x17\n\ntenderdash\x18\x03 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_driveB\r\n\x0b_tenderdash\x1a\xb8\x02\n\x08Protocol\x12p\n\ntenderdash\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Protocol.Tenderdash\x12\x66\n\x05\x64rive\x18\x02 \x01(\x0b\x32W.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Protocol.Drive\x1a(\n\nTenderdash\x12\x0b\n\x03p2p\x18\x01 \x01(\r\x12\r\n\x05\x62lock\x18\x02 \x01(\r\x1a(\n\x05\x44rive\x12\x0e\n\x06latest\x18\x03 \x01(\r\x12\x0f\n\x07\x63urrent\x18\x04 \x01(\r\x1a\x7f\n\x04Time\x12\x11\n\x05local\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x16\n\x05\x62lock\x18\x02 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01\x12\x18\n\x07genesis\x18\x03 \x01(\x04\x42\x02\x30\x01H\x01\x88\x01\x01\x12\x12\n\x05\x65poch\x18\x04 \x01(\rH\x02\x88\x01\x01\x42\x08\n\x06_blockB\n\n\x08_genesisB\x08\n\x06_epoch\x1a<\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\x18\n\x0bpro_tx_hash\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x42\x0e\n\x0c_pro_tx_hash\x1a\xb3\x02\n\x05\x43hain\x12\x13\n\x0b\x63\x61tching_up\x18\x01 \x01(\x08\x12\x19\n\x11latest_block_hash\x18\x02 \x01(\x0c\x12\x17\n\x0flatest_app_hash\x18\x03 \x01(\x0c\x12\x1f\n\x13latest_block_height\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x1b\n\x13\x65\x61rliest_block_hash\x18\x05 \x01(\x0c\x12\x19\n\x11\x65\x61rliest_app_hash\x18\x06 \x01(\x0c\x12!\n\x15\x65\x61rliest_block_height\x18\x07 \x01(\x04\x42\x02\x30\x01\x12!\n\x15max_peer_block_height\x18\t \x01(\x04\x42\x02\x30\x01\x12%\n\x18\x63ore_chain_locked_height\x18\n \x01(\rH\x00\x88\x01\x01\x42\x1b\n\x19_core_chain_locked_height\x1a\x43\n\x07Network\x12\x10\n\x08\x63hain_id\x18\x01 \x01(\t\x12\x13\n\x0bpeers_count\x18\x02 \x01(\r\x12\x11\n\tlistening\x18\x03 \x01(\x08\x1a\x85\x02\n\tStateSync\x12\x1d\n\x11total_synced_time\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x1a\n\x0eremaining_time\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x17\n\x0ftotal_snapshots\x18\x03 \x01(\r\x12\"\n\x16\x63hunk_process_avg_time\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x1b\n\x0fsnapshot_height\x18\x05 \x01(\x04\x42\x02\x30\x01\x12!\n\x15snapshot_chunks_count\x18\x06 \x01(\x04\x42\x02\x30\x01\x12\x1d\n\x11\x62\x61\x63kfilled_blocks\x18\x07 \x01(\x04\x42\x02\x30\x01\x12!\n\x15\x62\x61\x63kfill_blocks_total\x18\x08 \x01(\x04\x42\x02\x30\x01\x42\t\n\x07version\"\xb1\x01\n\x1cGetCurrentQuorumsInfoRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoRequest.GetCurrentQuorumsInfoRequestV0H\x00\x1a \n\x1eGetCurrentQuorumsInfoRequestV0B\t\n\x07version\"\xa1\x05\n\x1dGetCurrentQuorumsInfoResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse.GetCurrentQuorumsInfoResponseV0H\x00\x1a\x46\n\x0bValidatorV0\x12\x13\n\x0bpro_tx_hash\x18\x01 \x01(\x0c\x12\x0f\n\x07node_ip\x18\x02 \x01(\t\x12\x11\n\tis_banned\x18\x03 \x01(\x08\x1a\xaf\x01\n\x0eValidatorSetV0\x12\x13\n\x0bquorum_hash\x18\x01 \x01(\x0c\x12\x13\n\x0b\x63ore_height\x18\x02 \x01(\r\x12U\n\x07members\x18\x03 \x03(\x0b\x32\x44.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse.ValidatorV0\x12\x1c\n\x14threshold_public_key\x18\x04 \x01(\x0c\x1a\x92\x02\n\x1fGetCurrentQuorumsInfoResponseV0\x12\x15\n\rquorum_hashes\x18\x01 \x03(\x0c\x12\x1b\n\x13\x63urrent_quorum_hash\x18\x02 \x01(\x0c\x12_\n\x0evalidator_sets\x18\x03 \x03(\x0b\x32G.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse.ValidatorSetV0\x12\x1b\n\x13last_block_proposer\x18\x04 \x01(\x0c\x12=\n\x08metadata\x18\x05 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\t\n\x07version\"\xf4\x01\n\x1fGetIdentityTokenBalancesRequest\x12j\n\x02v0\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetIdentityTokenBalancesRequest.GetIdentityTokenBalancesRequestV0H\x00\x1aZ\n!GetIdentityTokenBalancesRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xad\x05\n GetIdentityTokenBalancesResponse\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse.GetIdentityTokenBalancesResponseV0H\x00\x1a\x8f\x04\n\"GetIdentityTokenBalancesResponseV0\x12\x86\x01\n\x0etoken_balances\x18\x01 \x01(\x0b\x32l.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse.GetIdentityTokenBalancesResponseV0.TokenBalancesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aG\n\x11TokenBalanceEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x14\n\x07\x62\x61lance\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_balance\x1a\x9a\x01\n\rTokenBalances\x12\x88\x01\n\x0etoken_balances\x18\x01 \x03(\x0b\x32p.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse.GetIdentityTokenBalancesResponseV0.TokenBalanceEntryB\x08\n\x06resultB\t\n\x07version\"\xfc\x01\n!GetIdentitiesTokenBalancesRequest\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesRequest.GetIdentitiesTokenBalancesRequestV0H\x00\x1a\\\n#GetIdentitiesTokenBalancesRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x14\n\x0cidentity_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xf2\x05\n\"GetIdentitiesTokenBalancesResponse\x12p\n\x02v0\x18\x01 \x01(\x0b\x32\x62.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse.GetIdentitiesTokenBalancesResponseV0H\x00\x1a\xce\x04\n$GetIdentitiesTokenBalancesResponseV0\x12\x9b\x01\n\x17identity_token_balances\x18\x01 \x01(\x0b\x32x.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse.GetIdentitiesTokenBalancesResponseV0.IdentityTokenBalancesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aR\n\x19IdentityTokenBalanceEntry\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x14\n\x07\x62\x61lance\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_balance\x1a\xb7\x01\n\x15IdentityTokenBalances\x12\x9d\x01\n\x17identity_token_balances\x18\x01 \x03(\x0b\x32|.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse.GetIdentitiesTokenBalancesResponseV0.IdentityTokenBalanceEntryB\x08\n\x06resultB\t\n\x07version\"\xe8\x01\n\x1cGetIdentityTokenInfosRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetIdentityTokenInfosRequest.GetIdentityTokenInfosRequestV0H\x00\x1aW\n\x1eGetIdentityTokenInfosRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\x98\x06\n\x1dGetIdentityTokenInfosResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0H\x00\x1a\x83\x05\n\x1fGetIdentityTokenInfosResponseV0\x12z\n\x0btoken_infos\x18\x01 \x01(\x0b\x32\x63.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0.TokenInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a(\n\x16TokenIdentityInfoEntry\x12\x0e\n\x06\x66rozen\x18\x01 \x01(\x08\x1a\xb0\x01\n\x0eTokenInfoEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x82\x01\n\x04info\x18\x02 \x01(\x0b\x32o.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0.TokenIdentityInfoEntryH\x00\x88\x01\x01\x42\x07\n\x05_info\x1a\x8a\x01\n\nTokenInfos\x12|\n\x0btoken_infos\x18\x01 \x03(\x0b\x32g.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0.TokenInfoEntryB\x08\n\x06resultB\t\n\x07version\"\xf0\x01\n\x1eGetIdentitiesTokenInfosRequest\x12h\n\x02v0\x18\x01 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosRequest.GetIdentitiesTokenInfosRequestV0H\x00\x1aY\n GetIdentitiesTokenInfosRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x14\n\x0cidentity_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xca\x06\n\x1fGetIdentitiesTokenInfosResponse\x12j\n\x02v0\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0H\x00\x1a\xaf\x05\n!GetIdentitiesTokenInfosResponseV0\x12\x8f\x01\n\x14identity_token_infos\x18\x01 \x01(\x0b\x32o.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0.IdentityTokenInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a(\n\x16TokenIdentityInfoEntry\x12\x0e\n\x06\x66rozen\x18\x01 \x01(\x08\x1a\xb7\x01\n\x0eTokenInfoEntry\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x86\x01\n\x04info\x18\x02 \x01(\x0b\x32s.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0.TokenIdentityInfoEntryH\x00\x88\x01\x01\x42\x07\n\x05_info\x1a\x97\x01\n\x12IdentityTokenInfos\x12\x80\x01\n\x0btoken_infos\x18\x01 \x03(\x0b\x32k.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0.TokenInfoEntryB\x08\n\x06resultB\t\n\x07version\"\xbf\x01\n\x17GetTokenStatusesRequest\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetTokenStatusesRequest.GetTokenStatusesRequestV0H\x00\x1a=\n\x19GetTokenStatusesRequestV0\x12\x11\n\ttoken_ids\x18\x01 \x03(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xe7\x04\n\x18GetTokenStatusesResponse\x12\\\n\x02v0\x18\x01 \x01(\x0b\x32N.org.dash.platform.dapi.v0.GetTokenStatusesResponse.GetTokenStatusesResponseV0H\x00\x1a\xe1\x03\n\x1aGetTokenStatusesResponseV0\x12v\n\x0etoken_statuses\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetTokenStatusesResponse.GetTokenStatusesResponseV0.TokenStatusesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x44\n\x10TokenStatusEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x13\n\x06paused\x18\x02 \x01(\x08H\x00\x88\x01\x01\x42\t\n\x07_paused\x1a\x88\x01\n\rTokenStatuses\x12w\n\x0etoken_statuses\x18\x01 \x03(\x0b\x32_.org.dash.platform.dapi.v0.GetTokenStatusesResponse.GetTokenStatusesResponseV0.TokenStatusEntryB\x08\n\x06resultB\t\n\x07version\"\xef\x04\n)GetTokenPreProgrammedDistributionsRequest\x12~\n\x02v0\x18\x01 \x01(\x0b\x32p.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsRequest.GetTokenPreProgrammedDistributionsRequestV0H\x00\x1a\xb6\x03\n+GetTokenPreProgrammedDistributionsRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x98\x01\n\rstart_at_info\x18\x02 \x01(\x0b\x32|.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsRequest.GetTokenPreProgrammedDistributionsRequestV0.StartAtInfoH\x00\x88\x01\x01\x12\x12\n\x05limit\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\x04 \x01(\x08\x1a\x9a\x01\n\x0bStartAtInfo\x12\x15\n\rstart_time_ms\x18\x01 \x01(\x04\x12\x1c\n\x0fstart_recipient\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x12%\n\x18start_recipient_included\x18\x03 \x01(\x08H\x01\x88\x01\x01\x42\x12\n\x10_start_recipientB\x1b\n\x19_start_recipient_includedB\x10\n\x0e_start_at_infoB\x08\n\x06_limitB\t\n\x07version\"\xec\x07\n*GetTokenPreProgrammedDistributionsResponse\x12\x80\x01\n\x02v0\x18\x01 \x01(\x0b\x32r.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0H\x00\x1a\xaf\x06\n,GetTokenPreProgrammedDistributionsResponseV0\x12\xa5\x01\n\x13token_distributions\x18\x01 \x01(\x0b\x32\x85\x01.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0.TokenDistributionsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a>\n\x16TokenDistributionEntry\x12\x14\n\x0crecipient_id\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x04\x1a\xd4\x01\n\x1bTokenTimedDistributionEntry\x12\x11\n\ttimestamp\x18\x01 \x01(\x04\x12\xa1\x01\n\rdistributions\x18\x02 \x03(\x0b\x32\x89\x01.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0.TokenDistributionEntry\x1a\xc3\x01\n\x12TokenDistributions\x12\xac\x01\n\x13token_distributions\x18\x01 \x03(\x0b\x32\x8e\x01.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0.TokenTimedDistributionEntryB\x08\n\x06resultB\t\n\x07version\"\xca\x01\n\x1aGetTokenTotalSupplyRequest\x12`\n\x02v0\x18\x01 \x01(\x0b\x32R.org.dash.platform.dapi.v0.GetTokenTotalSupplyRequest.GetTokenTotalSupplyRequestV0H\x00\x1a?\n\x1cGetTokenTotalSupplyRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xaf\x04\n\x1bGetTokenTotalSupplyResponse\x12\x62\n\x02v0\x18\x01 \x01(\x0b\x32T.org.dash.platform.dapi.v0.GetTokenTotalSupplyResponse.GetTokenTotalSupplyResponseV0H\x00\x1a\xa0\x03\n\x1dGetTokenTotalSupplyResponseV0\x12\x88\x01\n\x12token_total_supply\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetTokenTotalSupplyResponse.GetTokenTotalSupplyResponseV0.TokenTotalSupplyEntryH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1ax\n\x15TokenTotalSupplyEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x30\n(total_aggregated_amount_in_user_accounts\x18\x02 \x01(\x04\x12\x1b\n\x13total_system_amount\x18\x03 \x01(\x04\x42\x08\n\x06resultB\t\n\x07version\"\xd2\x01\n\x13GetGroupInfoRequest\x12R\n\x02v0\x18\x01 \x01(\x0b\x32\x44.org.dash.platform.dapi.v0.GetGroupInfoRequest.GetGroupInfoRequestV0H\x00\x1a\\\n\x15GetGroupInfoRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1f\n\x17group_contract_position\x18\x02 \x01(\r\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xd4\x05\n\x14GetGroupInfoResponse\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0H\x00\x1a\xda\x04\n\x16GetGroupInfoResponseV0\x12\x66\n\ngroup_info\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0.GroupInfoH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x04 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x34\n\x10GroupMemberEntry\x12\x11\n\tmember_id\x18\x01 \x01(\x0c\x12\r\n\x05power\x18\x02 \x01(\r\x1a\x98\x01\n\x0eGroupInfoEntry\x12h\n\x07members\x18\x01 \x03(\x0b\x32W.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0.GroupMemberEntry\x12\x1c\n\x14group_required_power\x18\x02 \x01(\r\x1a\x8a\x01\n\tGroupInfo\x12n\n\ngroup_info\x18\x01 \x01(\x0b\x32U.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0.GroupInfoEntryH\x00\x88\x01\x01\x42\r\n\x0b_group_infoB\x08\n\x06resultB\t\n\x07version\"\xed\x03\n\x14GetGroupInfosRequest\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetGroupInfosRequest.GetGroupInfosRequestV0H\x00\x1au\n\x1cStartAtGroupContractPosition\x12%\n\x1dstart_group_contract_position\x18\x01 \x01(\r\x12.\n&start_group_contract_position_included\x18\x02 \x01(\x08\x1a\xfc\x01\n\x16GetGroupInfosRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12{\n start_at_group_contract_position\x18\x02 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetGroupInfosRequest.StartAtGroupContractPositionH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\x04 \x01(\x08\x42#\n!_start_at_group_contract_positionB\x08\n\x06_countB\t\n\x07version\"\xff\x05\n\x15GetGroupInfosResponse\x12V\n\x02v0\x18\x01 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0H\x00\x1a\x82\x05\n\x17GetGroupInfosResponseV0\x12j\n\x0bgroup_infos\x18\x01 \x01(\x0b\x32S.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0.GroupInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x04 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x34\n\x10GroupMemberEntry\x12\x11\n\tmember_id\x18\x01 \x01(\x0c\x12\r\n\x05power\x18\x02 \x01(\r\x1a\xc3\x01\n\x16GroupPositionInfoEntry\x12\x1f\n\x17group_contract_position\x18\x01 \x01(\r\x12j\n\x07members\x18\x02 \x03(\x0b\x32Y.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0.GroupMemberEntry\x12\x1c\n\x14group_required_power\x18\x03 \x01(\r\x1a\x82\x01\n\nGroupInfos\x12t\n\x0bgroup_infos\x18\x01 \x03(\x0b\x32_.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0.GroupPositionInfoEntryB\x08\n\x06resultB\t\n\x07version\"\xbe\x04\n\x16GetGroupActionsRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetGroupActionsRequest.GetGroupActionsRequestV0H\x00\x1aL\n\x0fStartAtActionId\x12\x17\n\x0fstart_action_id\x18\x01 \x01(\x0c\x12 \n\x18start_action_id_included\x18\x02 \x01(\x08\x1a\xc8\x02\n\x18GetGroupActionsRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1f\n\x17group_contract_position\x18\x02 \x01(\r\x12N\n\x06status\x18\x03 \x01(\x0e\x32>.org.dash.platform.dapi.v0.GetGroupActionsRequest.ActionStatus\x12\x62\n\x12start_at_action_id\x18\x04 \x01(\x0b\x32\x41.org.dash.platform.dapi.v0.GetGroupActionsRequest.StartAtActionIdH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x05 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\x06 \x01(\x08\x42\x15\n\x13_start_at_action_idB\x08\n\x06_count\"&\n\x0c\x41\x63tionStatus\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\n\n\x06\x43LOSED\x10\x01\x42\t\n\x07version\"\xeb\x1d\n\x17GetGroupActionsResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0H\x00\x1a\xe8\x1c\n\x19GetGroupActionsResponseV0\x12r\n\rgroup_actions\x18\x01 \x01(\x0b\x32Y.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.GroupActionsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a[\n\tMintEvent\x12\x0e\n\x06\x61mount\x18\x01 \x01(\x04\x12\x14\n\x0crecipient_id\x18\x02 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\x45\n\tBurnEvent\x12\x0e\n\x06\x61mount\x18\x01 \x01(\x04\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1aJ\n\x0b\x46reezeEvent\x12\x11\n\tfrozen_id\x18\x01 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1aL\n\rUnfreezeEvent\x12\x11\n\tfrozen_id\x18\x01 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\x66\n\x17\x44\x65stroyFrozenFundsEvent\x12\x11\n\tfrozen_id\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x04\x12\x18\n\x0bpublic_note\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\x64\n\x13SharedEncryptedNote\x12\x18\n\x10sender_key_index\x18\x01 \x01(\r\x12\x1b\n\x13recipient_key_index\x18\x02 \x01(\r\x12\x16\n\x0e\x65ncrypted_data\x18\x03 \x01(\x0c\x1a{\n\x15PersonalEncryptedNote\x12!\n\x19root_encryption_key_index\x18\x01 \x01(\r\x12\'\n\x1f\x64\x65rivation_encryption_key_index\x18\x02 \x01(\r\x12\x16\n\x0e\x65ncrypted_data\x18\x03 \x01(\x0c\x1a\xa7\x03\n\rTransferEvent\x12\x14\n\x0crecipient_id\x18\x01 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x84\x01\n\x15shared_encrypted_note\x18\x03 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNoteH\x01\x88\x01\x01\x12\x88\x01\n\x17personal_encrypted_note\x18\x04 \x01(\x0b\x32\x62.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNoteH\x02\x88\x01\x01\x12\x0e\n\x06\x61mount\x18\x05 \x01(\x04\x42\x0e\n\x0c_public_noteB\x18\n\x16_shared_encrypted_noteB\x1a\n\x18_personal_encrypted_note\x1a\xe9\x01\n\x14\x45mergencyActionEvent\x12\x81\x01\n\x0b\x61\x63tion_type\x18\x01 \x01(\x0e\x32l.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.ActionType\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\"#\n\nActionType\x12\t\n\x05PAUSE\x10\x00\x12\n\n\x06RESUME\x10\x01\x42\x0e\n\x0c_public_note\x1a\x64\n\x16TokenConfigUpdateEvent\x12 \n\x18token_config_update_item\x18\x01 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\xfc\x02\n\x10GroupActionEvent\x12n\n\x0btoken_event\x18\x01 \x01(\x0b\x32W.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEventH\x00\x12t\n\x0e\x64ocument_event\x18\x02 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DocumentEventH\x00\x12t\n\x0e\x63ontract_event\x18\x03 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ContractEventH\x00\x42\x0c\n\nevent_type\x1a\x8b\x01\n\rDocumentEvent\x12r\n\x06\x63reate\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DocumentCreateEventH\x00\x42\x06\n\x04type\x1a/\n\x13\x44ocumentCreateEvent\x12\x18\n\x10\x63reated_document\x18\x01 \x01(\x0c\x1a/\n\x13\x43ontractUpdateEvent\x12\x18\n\x10updated_contract\x18\x01 \x01(\x0c\x1a\x8b\x01\n\rContractEvent\x12r\n\x06update\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ContractUpdateEventH\x00\x42\x06\n\x04type\x1a\xbb\x07\n\nTokenEvent\x12\x66\n\x04mint\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.MintEventH\x00\x12\x66\n\x04\x62urn\x18\x02 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.BurnEventH\x00\x12j\n\x06\x66reeze\x18\x03 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.FreezeEventH\x00\x12n\n\x08unfreeze\x18\x04 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEventH\x00\x12\x84\x01\n\x14\x64\x65stroy_frozen_funds\x18\x05 \x01(\x0b\x32\x64.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEventH\x00\x12n\n\x08transfer\x18\x06 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEventH\x00\x12}\n\x10\x65mergency_action\x18\x07 \x01(\x0b\x32\x61.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEventH\x00\x12\x82\x01\n\x13token_config_update\x18\x08 \x01(\x0b\x32\x63.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEventH\x00\x42\x06\n\x04type\x1a\x93\x01\n\x10GroupActionEntry\x12\x11\n\taction_id\x18\x01 \x01(\x0c\x12l\n\x05\x65vent\x18\x02 \x01(\x0b\x32].org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.GroupActionEvent\x1a\x84\x01\n\x0cGroupActions\x12t\n\rgroup_actions\x18\x01 \x03(\x0b\x32].org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.GroupActionEntryB\x08\n\x06resultB\t\n\x07version\"\x88\x03\n\x1cGetGroupActionSignersRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetGroupActionSignersRequest.GetGroupActionSignersRequestV0H\x00\x1a\xce\x01\n\x1eGetGroupActionSignersRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1f\n\x17group_contract_position\x18\x02 \x01(\r\x12T\n\x06status\x18\x03 \x01(\x0e\x32\x44.org.dash.platform.dapi.v0.GetGroupActionSignersRequest.ActionStatus\x12\x11\n\taction_id\x18\x04 \x01(\x0c\x12\r\n\x05prove\x18\x05 \x01(\x08\"&\n\x0c\x41\x63tionStatus\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\n\n\x06\x43LOSED\x10\x01\x42\t\n\x07version\"\x8b\x05\n\x1dGetGroupActionSignersResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetGroupActionSignersResponse.GetGroupActionSignersResponseV0H\x00\x1a\xf6\x03\n\x1fGetGroupActionSignersResponseV0\x12\x8b\x01\n\x14group_action_signers\x18\x01 \x01(\x0b\x32k.org.dash.platform.dapi.v0.GetGroupActionSignersResponse.GetGroupActionSignersResponseV0.GroupActionSignersH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x35\n\x11GroupActionSigner\x12\x11\n\tsigner_id\x18\x01 \x01(\x0c\x12\r\n\x05power\x18\x02 \x01(\r\x1a\x91\x01\n\x12GroupActionSigners\x12{\n\x07signers\x18\x01 \x03(\x0b\x32j.org.dash.platform.dapi.v0.GetGroupActionSignersResponse.GetGroupActionSignersResponseV0.GroupActionSignerB\x08\n\x06resultB\t\n\x07version*Z\n\nKeyPurpose\x12\x12\n\x0e\x41UTHENTICATION\x10\x00\x12\x0e\n\nENCRYPTION\x10\x01\x12\x0e\n\nDECRYPTION\x10\x02\x12\x0c\n\x08TRANSFER\x10\x03\x12\n\n\x06VOTING\x10\x05\x32\x9a\x30\n\x08Platform\x12\x93\x01\n\x18\x62roadcastStateTransition\x12:.org.dash.platform.dapi.v0.BroadcastStateTransitionRequest\x1a;.org.dash.platform.dapi.v0.BroadcastStateTransitionResponse\x12l\n\x0bgetIdentity\x12-.org.dash.platform.dapi.v0.GetIdentityRequest\x1a..org.dash.platform.dapi.v0.GetIdentityResponse\x12x\n\x0fgetIdentityKeys\x12\x31.org.dash.platform.dapi.v0.GetIdentityKeysRequest\x1a\x32.org.dash.platform.dapi.v0.GetIdentityKeysResponse\x12\x96\x01\n\x19getIdentitiesContractKeys\x12;.org.dash.platform.dapi.v0.GetIdentitiesContractKeysRequest\x1a<.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse\x12{\n\x10getIdentityNonce\x12\x32.org.dash.platform.dapi.v0.GetIdentityNonceRequest\x1a\x33.org.dash.platform.dapi.v0.GetIdentityNonceResponse\x12\x93\x01\n\x18getIdentityContractNonce\x12:.org.dash.platform.dapi.v0.GetIdentityContractNonceRequest\x1a;.org.dash.platform.dapi.v0.GetIdentityContractNonceResponse\x12\x81\x01\n\x12getIdentityBalance\x12\x34.org.dash.platform.dapi.v0.GetIdentityBalanceRequest\x1a\x35.org.dash.platform.dapi.v0.GetIdentityBalanceResponse\x12\x8a\x01\n\x15getIdentitiesBalances\x12\x37.org.dash.platform.dapi.v0.GetIdentitiesBalancesRequest\x1a\x38.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse\x12\xa2\x01\n\x1dgetIdentityBalanceAndRevision\x12?.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionRequest\x1a@.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionResponse\x12\xaf\x01\n#getEvonodesProposedEpochBlocksByIds\x12\x45.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByIdsRequest\x1a\x41.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse\x12\xb3\x01\n%getEvonodesProposedEpochBlocksByRange\x12G.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByRangeRequest\x1a\x41.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse\x12\x66\n\tgetProofs\x12+.org.dash.platform.dapi.v0.GetProofsRequest\x1a,.org.dash.platform.dapi.v0.GetProofsResponse\x12x\n\x0fgetDataContract\x12\x31.org.dash.platform.dapi.v0.GetDataContractRequest\x1a\x32.org.dash.platform.dapi.v0.GetDataContractResponse\x12\x8d\x01\n\x16getDataContractHistory\x12\x38.org.dash.platform.dapi.v0.GetDataContractHistoryRequest\x1a\x39.org.dash.platform.dapi.v0.GetDataContractHistoryResponse\x12{\n\x10getDataContracts\x12\x32.org.dash.platform.dapi.v0.GetDataContractsRequest\x1a\x33.org.dash.platform.dapi.v0.GetDataContractsResponse\x12o\n\x0cgetDocuments\x12..org.dash.platform.dapi.v0.GetDocumentsRequest\x1a/.org.dash.platform.dapi.v0.GetDocumentsResponse\x12\x99\x01\n\x1agetIdentityByPublicKeyHash\x12<.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashRequest\x1a=.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashResponse\x12\x9f\x01\n\x1cwaitForStateTransitionResult\x12>.org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest\x1a?.org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse\x12\x81\x01\n\x12getConsensusParams\x12\x34.org.dash.platform.dapi.v0.GetConsensusParamsRequest\x1a\x35.org.dash.platform.dapi.v0.GetConsensusParamsResponse\x12\xa5\x01\n\x1egetProtocolVersionUpgradeState\x12@.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateRequest\x1a\x41.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse\x12\xb4\x01\n#getProtocolVersionUpgradeVoteStatus\x12\x45.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusRequest\x1a\x46.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse\x12r\n\rgetEpochsInfo\x12/.org.dash.platform.dapi.v0.GetEpochsInfoRequest\x1a\x30.org.dash.platform.dapi.v0.GetEpochsInfoResponse\x12\x8a\x01\n\x15getContestedResources\x12\x37.org.dash.platform.dapi.v0.GetContestedResourcesRequest\x1a\x38.org.dash.platform.dapi.v0.GetContestedResourcesResponse\x12\xa2\x01\n\x1dgetContestedResourceVoteState\x12?.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest\x1a@.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse\x12\xba\x01\n%getContestedResourceVotersForIdentity\x12G.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityRequest\x1aH.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityResponse\x12\xae\x01\n!getContestedResourceIdentityVotes\x12\x43.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesRequest\x1a\x44.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse\x12\x8a\x01\n\x15getVotePollsByEndDate\x12\x37.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest\x1a\x38.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse\x12\xa5\x01\n\x1egetPrefundedSpecializedBalance\x12@.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceRequest\x1a\x41.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceResponse\x12\x96\x01\n\x19getTotalCreditsInPlatform\x12;.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformRequest\x1a<.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformResponse\x12x\n\x0fgetPathElements\x12\x31.org.dash.platform.dapi.v0.GetPathElementsRequest\x1a\x32.org.dash.platform.dapi.v0.GetPathElementsResponse\x12\x66\n\tgetStatus\x12+.org.dash.platform.dapi.v0.GetStatusRequest\x1a,.org.dash.platform.dapi.v0.GetStatusResponse\x12\x8a\x01\n\x15getCurrentQuorumsInfo\x12\x37.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoRequest\x1a\x38.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse\x12\x93\x01\n\x18getIdentityTokenBalances\x12:.org.dash.platform.dapi.v0.GetIdentityTokenBalancesRequest\x1a;.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse\x12\x99\x01\n\x1agetIdentitiesTokenBalances\x12<.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesRequest\x1a=.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse\x12\x8a\x01\n\x15getIdentityTokenInfos\x12\x37.org.dash.platform.dapi.v0.GetIdentityTokenInfosRequest\x1a\x38.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse\x12\x90\x01\n\x17getIdentitiesTokenInfos\x12\x39.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosRequest\x1a:.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse\x12{\n\x10getTokenStatuses\x12\x32.org.dash.platform.dapi.v0.GetTokenStatusesRequest\x1a\x33.org.dash.platform.dapi.v0.GetTokenStatusesResponse\x12\xb1\x01\n\"getTokenPreProgrammedDistributions\x12\x44.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsRequest\x1a\x45.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse\x12\x84\x01\n\x13getTokenTotalSupply\x12\x35.org.dash.platform.dapi.v0.GetTokenTotalSupplyRequest\x1a\x36.org.dash.platform.dapi.v0.GetTokenTotalSupplyResponse\x12o\n\x0cgetGroupInfo\x12..org.dash.platform.dapi.v0.GetGroupInfoRequest\x1a/.org.dash.platform.dapi.v0.GetGroupInfoResponse\x12r\n\rgetGroupInfos\x12/.org.dash.platform.dapi.v0.GetGroupInfosRequest\x1a\x30.org.dash.platform.dapi.v0.GetGroupInfosResponse\x12x\n\x0fgetGroupActions\x12\x31.org.dash.platform.dapi.v0.GetGroupActionsRequest\x1a\x32.org.dash.platform.dapi.v0.GetGroupActionsResponse\x12\x8a\x01\n\x15getGroupActionSigners\x12\x37.org.dash.platform.dapi.v0.GetGroupActionSignersRequest\x1a\x38.org.dash.platform.dapi.v0.GetGroupActionSignersResponseb\x06proto3' + serialized_pb=b'\n\x0eplatform.proto\x12\x19org.dash.platform.dapi.v0\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x81\x01\n\x05Proof\x12\x15\n\rgrovedb_proof\x18\x01 \x01(\x0c\x12\x13\n\x0bquorum_hash\x18\x02 \x01(\x0c\x12\x11\n\tsignature\x18\x03 \x01(\x0c\x12\r\n\x05round\x18\x04 \x01(\r\x12\x15\n\rblock_id_hash\x18\x05 \x01(\x0c\x12\x13\n\x0bquorum_type\x18\x06 \x01(\r\"\x98\x01\n\x10ResponseMetadata\x12\x12\n\x06height\x18\x01 \x01(\x04\x42\x02\x30\x01\x12 \n\x18\x63ore_chain_locked_height\x18\x02 \x01(\r\x12\r\n\x05\x65poch\x18\x03 \x01(\r\x12\x13\n\x07time_ms\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x18\n\x10protocol_version\x18\x05 \x01(\r\x12\x10\n\x08\x63hain_id\x18\x06 \x01(\t\"L\n\x1dStateTransitionBroadcastError\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\";\n\x1f\x42roadcastStateTransitionRequest\x12\x18\n\x10state_transition\x18\x01 \x01(\x0c\"\"\n BroadcastStateTransitionResponse\"\xa4\x01\n\x12GetIdentityRequest\x12P\n\x02v0\x18\x01 \x01(\x0b\x32\x42.org.dash.platform.dapi.v0.GetIdentityRequest.GetIdentityRequestV0H\x00\x1a\x31\n\x14GetIdentityRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xc1\x01\n\x17GetIdentityNonceRequest\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetIdentityNonceRequest.GetIdentityNonceRequestV0H\x00\x1a?\n\x19GetIdentityNonceRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xf6\x01\n\x1fGetIdentityContractNonceRequest\x12j\n\x02v0\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetIdentityContractNonceRequest.GetIdentityContractNonceRequestV0H\x00\x1a\\\n!GetIdentityContractNonceRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x13\n\x0b\x63ontract_id\x18\x02 \x01(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xc0\x01\n\x19GetIdentityBalanceRequest\x12^\n\x02v0\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetIdentityBalanceRequest.GetIdentityBalanceRequestV0H\x00\x1a\x38\n\x1bGetIdentityBalanceRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xec\x01\n$GetIdentityBalanceAndRevisionRequest\x12t\n\x02v0\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionRequest.GetIdentityBalanceAndRevisionRequestV0H\x00\x1a\x43\n&GetIdentityBalanceAndRevisionRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x9e\x02\n\x13GetIdentityResponse\x12R\n\x02v0\x18\x01 \x01(\x0b\x32\x44.org.dash.platform.dapi.v0.GetIdentityResponse.GetIdentityResponseV0H\x00\x1a\xa7\x01\n\x15GetIdentityResponseV0\x12\x12\n\x08identity\x18\x01 \x01(\x0cH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xbc\x02\n\x18GetIdentityNonceResponse\x12\\\n\x02v0\x18\x01 \x01(\x0b\x32N.org.dash.platform.dapi.v0.GetIdentityNonceResponse.GetIdentityNonceResponseV0H\x00\x1a\xb6\x01\n\x1aGetIdentityNonceResponseV0\x12\x1c\n\x0eidentity_nonce\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xe5\x02\n GetIdentityContractNonceResponse\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetIdentityContractNonceResponse.GetIdentityContractNonceResponseV0H\x00\x1a\xc7\x01\n\"GetIdentityContractNonceResponseV0\x12%\n\x17identity_contract_nonce\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xbd\x02\n\x1aGetIdentityBalanceResponse\x12`\n\x02v0\x18\x01 \x01(\x0b\x32R.org.dash.platform.dapi.v0.GetIdentityBalanceResponse.GetIdentityBalanceResponseV0H\x00\x1a\xb1\x01\n\x1cGetIdentityBalanceResponseV0\x12\x15\n\x07\x62\x61lance\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xb1\x04\n%GetIdentityBalanceAndRevisionResponse\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionResponse.GetIdentityBalanceAndRevisionResponseV0H\x00\x1a\x84\x03\n\'GetIdentityBalanceAndRevisionResponseV0\x12\x9b\x01\n\x14\x62\x61lance_and_revision\x18\x01 \x01(\x0b\x32{.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionResponse.GetIdentityBalanceAndRevisionResponseV0.BalanceAndRevisionH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a?\n\x12\x42\x61lanceAndRevision\x12\x13\n\x07\x62\x61lance\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x14\n\x08revision\x18\x02 \x01(\x04\x42\x02\x30\x01\x42\x08\n\x06resultB\t\n\x07version\"\xd1\x01\n\x0eKeyRequestType\x12\x36\n\x08\x61ll_keys\x18\x01 \x01(\x0b\x32\".org.dash.platform.dapi.v0.AllKeysH\x00\x12@\n\rspecific_keys\x18\x02 \x01(\x0b\x32\'.org.dash.platform.dapi.v0.SpecificKeysH\x00\x12:\n\nsearch_key\x18\x03 \x01(\x0b\x32$.org.dash.platform.dapi.v0.SearchKeyH\x00\x42\t\n\x07request\"\t\n\x07\x41llKeys\"\x1f\n\x0cSpecificKeys\x12\x0f\n\x07key_ids\x18\x01 \x03(\r\"\xb6\x01\n\tSearchKey\x12I\n\x0bpurpose_map\x18\x01 \x03(\x0b\x32\x34.org.dash.platform.dapi.v0.SearchKey.PurposeMapEntry\x1a^\n\x0fPurposeMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12:\n\x05value\x18\x02 \x01(\x0b\x32+.org.dash.platform.dapi.v0.SecurityLevelMap:\x02\x38\x01\"\xbf\x02\n\x10SecurityLevelMap\x12]\n\x12security_level_map\x18\x01 \x03(\x0b\x32\x41.org.dash.platform.dapi.v0.SecurityLevelMap.SecurityLevelMapEntry\x1aw\n\x15SecurityLevelMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12M\n\x05value\x18\x02 \x01(\x0e\x32>.org.dash.platform.dapi.v0.SecurityLevelMap.KeyKindRequestType:\x02\x38\x01\"S\n\x12KeyKindRequestType\x12\x1f\n\x1b\x43URRENT_KEY_OF_KIND_REQUEST\x10\x00\x12\x1c\n\x18\x41LL_KEYS_OF_KIND_REQUEST\x10\x01\"\xda\x02\n\x16GetIdentityKeysRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetIdentityKeysRequest.GetIdentityKeysRequestV0H\x00\x1a\xda\x01\n\x18GetIdentityKeysRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12?\n\x0crequest_type\x18\x02 \x01(\x0b\x32).org.dash.platform.dapi.v0.KeyRequestType\x12+\n\x05limit\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12,\n\x06offset\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\t\n\x07version\"\x99\x03\n\x17GetIdentityKeysResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetIdentityKeysResponse.GetIdentityKeysResponseV0H\x00\x1a\x96\x02\n\x19GetIdentityKeysResponseV0\x12\x61\n\x04keys\x18\x01 \x01(\x0b\x32Q.org.dash.platform.dapi.v0.GetIdentityKeysResponse.GetIdentityKeysResponseV0.KeysH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x1a\n\x04Keys\x12\x12\n\nkeys_bytes\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\xef\x02\n GetIdentitiesContractKeysRequest\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetIdentitiesContractKeysRequest.GetIdentitiesContractKeysRequestV0H\x00\x1a\xd1\x01\n\"GetIdentitiesContractKeysRequestV0\x12\x16\n\x0eidentities_ids\x18\x01 \x03(\x0c\x12\x13\n\x0b\x63ontract_id\x18\x02 \x01(\x0c\x12\x1f\n\x12\x64ocument_type_name\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x37\n\x08purposes\x18\x04 \x03(\x0e\x32%.org.dash.platform.dapi.v0.KeyPurpose\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\x15\n\x13_document_type_nameB\t\n\x07version\"\xdf\x06\n!GetIdentitiesContractKeysResponse\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0H\x00\x1a\xbe\x05\n#GetIdentitiesContractKeysResponseV0\x12\x8a\x01\n\x0fidentities_keys\x18\x01 \x01(\x0b\x32o.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0.IdentitiesKeysH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aY\n\x0bPurposeKeys\x12\x36\n\x07purpose\x18\x01 \x01(\x0e\x32%.org.dash.platform.dapi.v0.KeyPurpose\x12\x12\n\nkeys_bytes\x18\x02 \x03(\x0c\x1a\x9f\x01\n\x0cIdentityKeys\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12z\n\x04keys\x18\x02 \x03(\x0b\x32l.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0.PurposeKeys\x1a\x90\x01\n\x0eIdentitiesKeys\x12~\n\x07\x65ntries\x18\x01 \x03(\x0b\x32m.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse.GetIdentitiesContractKeysResponseV0.IdentityKeysB\x08\n\x06resultB\t\n\x07version\"\xa4\x02\n*GetEvonodesProposedEpochBlocksByIdsRequest\x12\x80\x01\n\x02v0\x18\x01 \x01(\x0b\x32r.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByIdsRequest.GetEvonodesProposedEpochBlocksByIdsRequestV0H\x00\x1ah\n,GetEvonodesProposedEpochBlocksByIdsRequestV0\x12\x12\n\x05\x65poch\x18\x01 \x01(\rH\x00\x88\x01\x01\x12\x0b\n\x03ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\x08\n\x06_epochB\t\n\x07version\"\x92\x06\n&GetEvonodesProposedEpochBlocksResponse\x12x\n\x02v0\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse.GetEvonodesProposedEpochBlocksResponseV0H\x00\x1a\xe2\x04\n(GetEvonodesProposedEpochBlocksResponseV0\x12\xb1\x01\n#evonodes_proposed_block_counts_info\x18\x01 \x01(\x0b\x32\x81\x01.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse.GetEvonodesProposedEpochBlocksResponseV0.EvonodesProposedBlocksH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a?\n\x15\x45vonodeProposedBlocks\x12\x13\n\x0bpro_tx_hash\x18\x01 \x01(\x0c\x12\x11\n\x05\x63ount\x18\x02 \x01(\x04\x42\x02\x30\x01\x1a\xc4\x01\n\x16\x45vonodesProposedBlocks\x12\xa9\x01\n\x1e\x65vonodes_proposed_block_counts\x18\x01 \x03(\x0b\x32\x80\x01.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse.GetEvonodesProposedEpochBlocksResponseV0.EvonodeProposedBlocksB\x08\n\x06resultB\t\n\x07version\"\xf2\x02\n,GetEvonodesProposedEpochBlocksByRangeRequest\x12\x84\x01\n\x02v0\x18\x01 \x01(\x0b\x32v.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByRangeRequest.GetEvonodesProposedEpochBlocksByRangeRequestV0H\x00\x1a\xaf\x01\n.GetEvonodesProposedEpochBlocksByRangeRequestV0\x12\x12\n\x05\x65poch\x18\x01 \x01(\rH\x01\x88\x01\x01\x12\x12\n\x05limit\x18\x02 \x01(\rH\x02\x88\x01\x01\x12\x15\n\x0bstart_after\x18\x03 \x01(\x0cH\x00\x12\x12\n\x08start_at\x18\x04 \x01(\x0cH\x00\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\x07\n\x05startB\x08\n\x06_epochB\x08\n\x06_limitB\t\n\x07version\"\xcd\x01\n\x1cGetIdentitiesBalancesRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetIdentitiesBalancesRequest.GetIdentitiesBalancesRequestV0H\x00\x1a<\n\x1eGetIdentitiesBalancesRequestV0\x12\x0b\n\x03ids\x18\x01 \x03(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x9f\x05\n\x1dGetIdentitiesBalancesResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse.GetIdentitiesBalancesResponseV0H\x00\x1a\x8a\x04\n\x1fGetIdentitiesBalancesResponseV0\x12\x8a\x01\n\x13identities_balances\x18\x01 \x01(\x0b\x32k.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse.GetIdentitiesBalancesResponseV0.IdentitiesBalancesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aL\n\x0fIdentityBalance\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x18\n\x07\x62\x61lance\x18\x02 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01\x42\n\n\x08_balance\x1a\x8f\x01\n\x12IdentitiesBalances\x12y\n\x07\x65ntries\x18\x01 \x03(\x0b\x32h.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse.GetIdentitiesBalancesResponseV0.IdentityBalanceB\x08\n\x06resultB\t\n\x07version\"\xdb\x0f\n\x10GetProofsRequest\x12L\n\x02v0\x18\x01 \x01(\x0b\x32>.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0H\x00\x1a\xed\x0e\n\x12GetProofsRequestV0\x12\x62\n\nidentities\x18\x01 \x03(\x0b\x32N.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityRequest\x12\x61\n\tcontracts\x18\x02 \x03(\x0b\x32N.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.ContractRequest\x12\x61\n\tdocuments\x18\x03 \x03(\x0b\x32N.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.DocumentRequest\x12_\n\x05votes\x18\x04 \x03(\x0b\x32P.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.VoteStatusRequest\x12{\n\x17identity_token_balances\x18\x05 \x03(\x0b\x32Z.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityTokenBalanceRequest\x12u\n\x14identity_token_infos\x18\x06 \x03(\x0b\x32W.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityTokenInfoRequest\x12i\n\x0etoken_statuses\x18\x07 \x03(\x0b\x32Q.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.TokenStatusRequest\x1a\xd5\x02\n\x0f\x44ocumentRequest\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x15\n\rdocument_type\x18\x02 \x01(\t\x12#\n\x1b\x64ocument_type_keeps_history\x18\x03 \x01(\x08\x12\x13\n\x0b\x64ocument_id\x18\x04 \x01(\x0c\x12\x89\x01\n\x19\x64ocument_contested_status\x18\x05 \x01(\x0e\x32\x66.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.DocumentRequest.DocumentContestedStatus\"P\n\x17\x44ocumentContestedStatus\x12\x11\n\rNOT_CONTESTED\x10\x00\x12\x13\n\x0fMAYBE_CONTESTED\x10\x01\x12\r\n\tCONTESTED\x10\x02\x1a\xd1\x01\n\x0fIdentityRequest\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12i\n\x0crequest_type\x18\x02 \x01(\x0e\x32S.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.IdentityRequest.Type\">\n\x04Type\x12\x11\n\rFULL_IDENTITY\x10\x00\x12\x0b\n\x07\x42\x41LANCE\x10\x01\x12\x08\n\x04KEYS\x10\x02\x12\x0c\n\x08REVISION\x10\x03\x1a&\n\x0f\x43ontractRequest\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x1a\xe7\x02\n\x11VoteStatusRequest\x12\xa5\x01\n&contested_resource_vote_status_request\x18\x01 \x01(\x0b\x32s.org.dash.platform.dapi.v0.GetProofsRequest.GetProofsRequestV0.VoteStatusRequest.ContestedResourceVoteStatusRequestH\x00\x1a\x99\x01\n\"ContestedResourceVoteStatusRequest\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x14\n\x0cindex_values\x18\x04 \x03(\x0c\x12\x18\n\x10voter_identifier\x18\x05 \x01(\x0c\x42\x0e\n\x0crequest_type\x1a\x44\n\x1bIdentityTokenBalanceRequest\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x13\n\x0bidentity_id\x18\x02 \x01(\x0c\x1a\x41\n\x18IdentityTokenInfoRequest\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x13\n\x0bidentity_id\x18\x02 \x01(\x0c\x1a&\n\x12TokenStatusRequest\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x42\t\n\x07version\"\x82\x02\n\x11GetProofsResponse\x12N\n\x02v0\x18\x01 \x01(\x0b\x32@.org.dash.platform.dapi.v0.GetProofsResponse.GetProofsResponseV0H\x00\x1a\x91\x01\n\x13GetProofsResponseV0\x12\x31\n\x05proof\x18\x01 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x02 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xb4\x01\n\x16GetDataContractRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetDataContractRequest.GetDataContractRequestV0H\x00\x1a\x35\n\x18GetDataContractRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xb3\x02\n\x17GetDataContractResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetDataContractResponse.GetDataContractResponseV0H\x00\x1a\xb0\x01\n\x19GetDataContractResponseV0\x12\x17\n\rdata_contract\x18\x01 \x01(\x0cH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xb9\x01\n\x17GetDataContractsRequest\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetDataContractsRequest.GetDataContractsRequestV0H\x00\x1a\x37\n\x19GetDataContractsRequestV0\x12\x0b\n\x03ids\x18\x01 \x03(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xcf\x04\n\x18GetDataContractsResponse\x12\\\n\x02v0\x18\x01 \x01(\x0b\x32N.org.dash.platform.dapi.v0.GetDataContractsResponse.GetDataContractsResponseV0H\x00\x1a[\n\x11\x44\x61taContractEntry\x12\x12\n\nidentifier\x18\x01 \x01(\x0c\x12\x32\n\rdata_contract\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x1au\n\rDataContracts\x12\x64\n\x15\x64\x61ta_contract_entries\x18\x01 \x03(\x0b\x32\x45.org.dash.platform.dapi.v0.GetDataContractsResponse.DataContractEntry\x1a\xf5\x01\n\x1aGetDataContractsResponseV0\x12[\n\x0e\x64\x61ta_contracts\x18\x01 \x01(\x0b\x32\x41.org.dash.platform.dapi.v0.GetDataContractsResponse.DataContractsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xc5\x02\n\x1dGetDataContractHistoryRequest\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetDataContractHistoryRequest.GetDataContractHistoryRequestV0H\x00\x1a\xb0\x01\n\x1fGetDataContractHistoryRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12+\n\x05limit\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12,\n\x06offset\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x17\n\x0bstart_at_ms\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\r\n\x05prove\x18\x05 \x01(\x08\x42\t\n\x07version\"\xb2\x05\n\x1eGetDataContractHistoryResponse\x12h\n\x02v0\x18\x01 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetDataContractHistoryResponse.GetDataContractHistoryResponseV0H\x00\x1a\x9a\x04\n GetDataContractHistoryResponseV0\x12\x8f\x01\n\x15\x64\x61ta_contract_history\x18\x01 \x01(\x0b\x32n.org.dash.platform.dapi.v0.GetDataContractHistoryResponse.GetDataContractHistoryResponseV0.DataContractHistoryH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a;\n\x18\x44\x61taContractHistoryEntry\x12\x10\n\x04\x64\x61te\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\r\n\x05value\x18\x02 \x01(\x0c\x1a\xaa\x01\n\x13\x44\x61taContractHistory\x12\x92\x01\n\x15\x64\x61ta_contract_entries\x18\x01 \x03(\x0b\x32s.org.dash.platform.dapi.v0.GetDataContractHistoryResponse.GetDataContractHistoryResponseV0.DataContractHistoryEntryB\x08\n\x06resultB\t\n\x07version\"\xb2\x02\n\x13GetDocumentsRequest\x12R\n\x02v0\x18\x01 \x01(\x0b\x32\x44.org.dash.platform.dapi.v0.GetDocumentsRequest.GetDocumentsRequestV0H\x00\x1a\xbb\x01\n\x15GetDocumentsRequestV0\x12\x18\n\x10\x64\x61ta_contract_id\x18\x01 \x01(\x0c\x12\x15\n\rdocument_type\x18\x02 \x01(\t\x12\r\n\x05where\x18\x03 \x01(\x0c\x12\x10\n\x08order_by\x18\x04 \x01(\x0c\x12\r\n\x05limit\x18\x05 \x01(\r\x12\x15\n\x0bstart_after\x18\x06 \x01(\x0cH\x00\x12\x12\n\x08start_at\x18\x07 \x01(\x0cH\x00\x12\r\n\x05prove\x18\x08 \x01(\x08\x42\x07\n\x05startB\t\n\x07version\"\x95\x03\n\x14GetDocumentsResponse\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetDocumentsResponse.GetDocumentsResponseV0H\x00\x1a\x9b\x02\n\x16GetDocumentsResponseV0\x12\x65\n\tdocuments\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetDocumentsResponse.GetDocumentsResponseV0.DocumentsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x1e\n\tDocuments\x12\x11\n\tdocuments\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\xed\x01\n!GetIdentityByPublicKeyHashRequest\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashRequest.GetIdentityByPublicKeyHashRequestV0H\x00\x1aM\n#GetIdentityByPublicKeyHashRequestV0\x12\x17\n\x0fpublic_key_hash\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xda\x02\n\"GetIdentityByPublicKeyHashResponse\x12p\n\x02v0\x18\x01 \x01(\x0b\x32\x62.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashResponse.GetIdentityByPublicKeyHashResponseV0H\x00\x1a\xb6\x01\n$GetIdentityByPublicKeyHashResponseV0\x12\x12\n\x08identity\x18\x01 \x01(\x0cH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xfb\x01\n#WaitForStateTransitionResultRequest\x12r\n\x02v0\x18\x01 \x01(\x0b\x32\x64.org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest.WaitForStateTransitionResultRequestV0H\x00\x1aU\n%WaitForStateTransitionResultRequestV0\x12\x1d\n\x15state_transition_hash\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x99\x03\n$WaitForStateTransitionResultResponse\x12t\n\x02v0\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse.WaitForStateTransitionResultResponseV0H\x00\x1a\xef\x01\n&WaitForStateTransitionResultResponseV0\x12I\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x38.org.dash.platform.dapi.v0.StateTransitionBroadcastErrorH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xc4\x01\n\x19GetConsensusParamsRequest\x12^\n\x02v0\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetConsensusParamsRequest.GetConsensusParamsRequestV0H\x00\x1a<\n\x1bGetConsensusParamsRequestV0\x12\x0e\n\x06height\x18\x01 \x01(\x05\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\x9c\x04\n\x1aGetConsensusParamsResponse\x12`\n\x02v0\x18\x01 \x01(\x0b\x32R.org.dash.platform.dapi.v0.GetConsensusParamsResponse.GetConsensusParamsResponseV0H\x00\x1aP\n\x14\x43onsensusParamsBlock\x12\x11\n\tmax_bytes\x18\x01 \x01(\t\x12\x0f\n\x07max_gas\x18\x02 \x01(\t\x12\x14\n\x0ctime_iota_ms\x18\x03 \x01(\t\x1a\x62\n\x17\x43onsensusParamsEvidence\x12\x1a\n\x12max_age_num_blocks\x18\x01 \x01(\t\x12\x18\n\x10max_age_duration\x18\x02 \x01(\t\x12\x11\n\tmax_bytes\x18\x03 \x01(\t\x1a\xda\x01\n\x1cGetConsensusParamsResponseV0\x12Y\n\x05\x62lock\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetConsensusParamsResponse.ConsensusParamsBlock\x12_\n\x08\x65vidence\x18\x02 \x01(\x0b\x32M.org.dash.platform.dapi.v0.GetConsensusParamsResponse.ConsensusParamsEvidenceB\t\n\x07version\"\xe4\x01\n%GetProtocolVersionUpgradeStateRequest\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateRequest.GetProtocolVersionUpgradeStateRequestV0H\x00\x1a\x38\n\'GetProtocolVersionUpgradeStateRequestV0\x12\r\n\x05prove\x18\x01 \x01(\x08\x42\t\n\x07version\"\xb5\x05\n&GetProtocolVersionUpgradeStateResponse\x12x\n\x02v0\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse.GetProtocolVersionUpgradeStateResponseV0H\x00\x1a\x85\x04\n(GetProtocolVersionUpgradeStateResponseV0\x12\x87\x01\n\x08versions\x18\x01 \x01(\x0b\x32s.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse.GetProtocolVersionUpgradeStateResponseV0.VersionsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x96\x01\n\x08Versions\x12\x89\x01\n\x08versions\x18\x01 \x03(\x0b\x32w.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse.GetProtocolVersionUpgradeStateResponseV0.VersionEntry\x1a:\n\x0cVersionEntry\x12\x16\n\x0eversion_number\x18\x01 \x01(\r\x12\x12\n\nvote_count\x18\x02 \x01(\rB\x08\n\x06resultB\t\n\x07version\"\xa3\x02\n*GetProtocolVersionUpgradeVoteStatusRequest\x12\x80\x01\n\x02v0\x18\x01 \x01(\x0b\x32r.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusRequest.GetProtocolVersionUpgradeVoteStatusRequestV0H\x00\x1ag\n,GetProtocolVersionUpgradeVoteStatusRequestV0\x12\x19\n\x11start_pro_tx_hash\x18\x01 \x01(\x0c\x12\r\n\x05\x63ount\x18\x02 \x01(\r\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xef\x05\n+GetProtocolVersionUpgradeVoteStatusResponse\x12\x82\x01\n\x02v0\x18\x01 \x01(\x0b\x32t.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse.GetProtocolVersionUpgradeVoteStatusResponseV0H\x00\x1a\xaf\x04\n-GetProtocolVersionUpgradeVoteStatusResponseV0\x12\x98\x01\n\x08versions\x18\x01 \x01(\x0b\x32\x83\x01.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse.GetProtocolVersionUpgradeVoteStatusResponseV0.VersionSignalsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\xaf\x01\n\x0eVersionSignals\x12\x9c\x01\n\x0fversion_signals\x18\x01 \x03(\x0b\x32\x82\x01.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse.GetProtocolVersionUpgradeVoteStatusResponseV0.VersionSignal\x1a\x35\n\rVersionSignal\x12\x13\n\x0bpro_tx_hash\x18\x01 \x01(\x0c\x12\x0f\n\x07version\x18\x02 \x01(\rB\x08\n\x06resultB\t\n\x07version\"\xf5\x01\n\x14GetEpochsInfoRequest\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetEpochsInfoRequest.GetEpochsInfoRequestV0H\x00\x1a|\n\x16GetEpochsInfoRequestV0\x12\x31\n\x0bstart_epoch\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\r\n\x05\x63ount\x18\x02 \x01(\r\x12\x11\n\tascending\x18\x03 \x01(\x08\x12\r\n\x05prove\x18\x04 \x01(\x08\x42\t\n\x07version\"\x99\x05\n\x15GetEpochsInfoResponse\x12V\n\x02v0\x18\x01 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetEpochsInfoResponse.GetEpochsInfoResponseV0H\x00\x1a\x9c\x04\n\x17GetEpochsInfoResponseV0\x12\x65\n\x06\x65pochs\x18\x01 \x01(\x0b\x32S.org.dash.platform.dapi.v0.GetEpochsInfoResponse.GetEpochsInfoResponseV0.EpochInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1au\n\nEpochInfos\x12g\n\x0b\x65poch_infos\x18\x01 \x03(\x0b\x32R.org.dash.platform.dapi.v0.GetEpochsInfoResponse.GetEpochsInfoResponseV0.EpochInfo\x1a\xa6\x01\n\tEpochInfo\x12\x0e\n\x06number\x18\x01 \x01(\r\x12\x1e\n\x12\x66irst_block_height\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x1f\n\x17\x66irst_core_block_height\x18\x03 \x01(\r\x12\x16\n\nstart_time\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x16\n\x0e\x66\x65\x65_multiplier\x18\x05 \x01(\x01\x12\x18\n\x10protocol_version\x18\x06 \x01(\rB\x08\n\x06resultB\t\n\x07version\"\xde\x04\n\x1cGetContestedResourcesRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetContestedResourcesRequest.GetContestedResourcesRequestV0H\x00\x1a\xcc\x03\n\x1eGetContestedResourcesRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x1a\n\x12start_index_values\x18\x04 \x03(\x0c\x12\x18\n\x10\x65nd_index_values\x18\x05 \x03(\x0c\x12\x89\x01\n\x13start_at_value_info\x18\x06 \x01(\x0b\x32g.org.dash.platform.dapi.v0.GetContestedResourcesRequest.GetContestedResourcesRequestV0.StartAtValueInfoH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x07 \x01(\rH\x01\x88\x01\x01\x12\x17\n\x0forder_ascending\x18\x08 \x01(\x08\x12\r\n\x05prove\x18\t \x01(\x08\x1a\x45\n\x10StartAtValueInfo\x12\x13\n\x0bstart_value\x18\x01 \x01(\x0c\x12\x1c\n\x14start_value_included\x18\x02 \x01(\x08\x42\x16\n\x14_start_at_value_infoB\x08\n\x06_countB\t\n\x07version\"\x88\x04\n\x1dGetContestedResourcesResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetContestedResourcesResponse.GetContestedResourcesResponseV0H\x00\x1a\xf3\x02\n\x1fGetContestedResourcesResponseV0\x12\x95\x01\n\x19\x63ontested_resource_values\x18\x01 \x01(\x0b\x32p.org.dash.platform.dapi.v0.GetContestedResourcesResponse.GetContestedResourcesResponseV0.ContestedResourceValuesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a<\n\x17\x43ontestedResourceValues\x12!\n\x19\x63ontested_resource_values\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\xd2\x05\n\x1cGetVotePollsByEndDateRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest.GetVotePollsByEndDateRequestV0H\x00\x1a\xc0\x04\n\x1eGetVotePollsByEndDateRequestV0\x12\x84\x01\n\x0fstart_time_info\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest.GetVotePollsByEndDateRequestV0.StartAtTimeInfoH\x00\x88\x01\x01\x12\x80\x01\n\rend_time_info\x18\x02 \x01(\x0b\x32\x64.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest.GetVotePollsByEndDateRequestV0.EndAtTimeInfoH\x01\x88\x01\x01\x12\x12\n\x05limit\x18\x03 \x01(\rH\x02\x88\x01\x01\x12\x13\n\x06offset\x18\x04 \x01(\rH\x03\x88\x01\x01\x12\x11\n\tascending\x18\x05 \x01(\x08\x12\r\n\x05prove\x18\x06 \x01(\x08\x1aI\n\x0fStartAtTimeInfo\x12\x19\n\rstart_time_ms\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x1b\n\x13start_time_included\x18\x02 \x01(\x08\x1a\x43\n\rEndAtTimeInfo\x12\x17\n\x0b\x65nd_time_ms\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x19\n\x11\x65nd_time_included\x18\x02 \x01(\x08\x42\x12\n\x10_start_time_infoB\x10\n\x0e_end_time_infoB\x08\n\x06_limitB\t\n\x07_offsetB\t\n\x07version\"\x83\x06\n\x1dGetVotePollsByEndDateResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse.GetVotePollsByEndDateResponseV0H\x00\x1a\xee\x04\n\x1fGetVotePollsByEndDateResponseV0\x12\x9c\x01\n\x18vote_polls_by_timestamps\x18\x01 \x01(\x0b\x32x.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse.GetVotePollsByEndDateResponseV0.SerializedVotePollsByTimestampsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aV\n\x1eSerializedVotePollsByTimestamp\x12\x15\n\ttimestamp\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x1d\n\x15serialized_vote_polls\x18\x02 \x03(\x0c\x1a\xd7\x01\n\x1fSerializedVotePollsByTimestamps\x12\x99\x01\n\x18vote_polls_by_timestamps\x18\x01 \x03(\x0b\x32w.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse.GetVotePollsByEndDateResponseV0.SerializedVotePollsByTimestamp\x12\x18\n\x10\x66inished_results\x18\x02 \x01(\x08\x42\x08\n\x06resultB\t\n\x07version\"\xff\x06\n$GetContestedResourceVoteStateRequest\x12t\n\x02v0\x18\x01 \x01(\x0b\x32\x66.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest.GetContestedResourceVoteStateRequestV0H\x00\x1a\xd5\x05\n&GetContestedResourceVoteStateRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x14\n\x0cindex_values\x18\x04 \x03(\x0c\x12\x86\x01\n\x0bresult_type\x18\x05 \x01(\x0e\x32q.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest.GetContestedResourceVoteStateRequestV0.ResultType\x12\x36\n.allow_include_locked_and_abstaining_vote_tally\x18\x06 \x01(\x08\x12\xa3\x01\n\x18start_at_identifier_info\x18\x07 \x01(\x0b\x32|.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest.GetContestedResourceVoteStateRequestV0.StartAtIdentifierInfoH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x08 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\t \x01(\x08\x1aT\n\x15StartAtIdentifierInfo\x12\x18\n\x10start_identifier\x18\x01 \x01(\x0c\x12!\n\x19start_identifier_included\x18\x02 \x01(\x08\"I\n\nResultType\x12\r\n\tDOCUMENTS\x10\x00\x12\x0e\n\nVOTE_TALLY\x10\x01\x12\x1c\n\x18\x44OCUMENTS_AND_VOTE_TALLY\x10\x02\x42\x1b\n\x19_start_at_identifier_infoB\x08\n\x06_countB\t\n\x07version\"\x94\x0c\n%GetContestedResourceVoteStateResponse\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0H\x00\x1a\xe7\n\n\'GetContestedResourceVoteStateResponseV0\x12\xae\x01\n\x1d\x63ontested_resource_contenders\x18\x01 \x01(\x0b\x32\x84\x01.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.ContestedResourceContendersH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\xda\x03\n\x10\x46inishedVoteInfo\x12\xad\x01\n\x15\x66inished_vote_outcome\x18\x01 \x01(\x0e\x32\x8d\x01.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.FinishedVoteInfo.FinishedVoteOutcome\x12\x1f\n\x12won_by_identity_id\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x12$\n\x18\x66inished_at_block_height\x18\x03 \x01(\x04\x42\x02\x30\x01\x12%\n\x1d\x66inished_at_core_block_height\x18\x04 \x01(\r\x12%\n\x19\x66inished_at_block_time_ms\x18\x05 \x01(\x04\x42\x02\x30\x01\x12\x19\n\x11\x66inished_at_epoch\x18\x06 \x01(\r\"O\n\x13\x46inishedVoteOutcome\x12\x14\n\x10TOWARDS_IDENTITY\x10\x00\x12\n\n\x06LOCKED\x10\x01\x12\x16\n\x12NO_PREVIOUS_WINNER\x10\x02\x42\x15\n\x13_won_by_identity_id\x1a\xc4\x03\n\x1b\x43ontestedResourceContenders\x12\x86\x01\n\ncontenders\x18\x01 \x03(\x0b\x32r.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.Contender\x12\x1f\n\x12\x61\x62stain_vote_tally\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x1c\n\x0flock_vote_tally\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\x9a\x01\n\x12\x66inished_vote_info\x18\x04 \x01(\x0b\x32y.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse.GetContestedResourceVoteStateResponseV0.FinishedVoteInfoH\x02\x88\x01\x01\x42\x15\n\x13_abstain_vote_tallyB\x12\n\x10_lock_vote_tallyB\x15\n\x13_finished_vote_info\x1ak\n\tContender\x12\x12\n\nidentifier\x18\x01 \x01(\x0c\x12\x17\n\nvote_count\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x15\n\x08\x64ocument\x18\x03 \x01(\x0cH\x01\x88\x01\x01\x42\r\n\x0b_vote_countB\x0b\n\t_documentB\x08\n\x06resultB\t\n\x07version\"\xd5\x05\n,GetContestedResourceVotersForIdentityRequest\x12\x84\x01\n\x02v0\x18\x01 \x01(\x0b\x32v.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityRequest.GetContestedResourceVotersForIdentityRequestV0H\x00\x1a\x92\x04\n.GetContestedResourceVotersForIdentityRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\x12\n\nindex_name\x18\x03 \x01(\t\x12\x14\n\x0cindex_values\x18\x04 \x03(\x0c\x12\x15\n\rcontestant_id\x18\x05 \x01(\x0c\x12\xb4\x01\n\x18start_at_identifier_info\x18\x06 \x01(\x0b\x32\x8c\x01.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityRequest.GetContestedResourceVotersForIdentityRequestV0.StartAtIdentifierInfoH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x07 \x01(\rH\x01\x88\x01\x01\x12\x17\n\x0forder_ascending\x18\x08 \x01(\x08\x12\r\n\x05prove\x18\t \x01(\x08\x1aT\n\x15StartAtIdentifierInfo\x12\x18\n\x10start_identifier\x18\x01 \x01(\x0c\x12!\n\x19start_identifier_included\x18\x02 \x01(\x08\x42\x1b\n\x19_start_at_identifier_infoB\x08\n\x06_countB\t\n\x07version\"\xf1\x04\n-GetContestedResourceVotersForIdentityResponse\x12\x86\x01\n\x02v0\x18\x01 \x01(\x0b\x32x.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityResponse.GetContestedResourceVotersForIdentityResponseV0H\x00\x1a\xab\x03\n/GetContestedResourceVotersForIdentityResponseV0\x12\xb6\x01\n\x19\x63ontested_resource_voters\x18\x01 \x01(\x0b\x32\x90\x01.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityResponse.GetContestedResourceVotersForIdentityResponseV0.ContestedResourceVotersH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x43\n\x17\x43ontestedResourceVoters\x12\x0e\n\x06voters\x18\x01 \x03(\x0c\x12\x18\n\x10\x66inished_results\x18\x02 \x01(\x08\x42\x08\n\x06resultB\t\n\x07version\"\xad\x05\n(GetContestedResourceIdentityVotesRequest\x12|\n\x02v0\x18\x01 \x01(\x0b\x32n.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesRequest.GetContestedResourceIdentityVotesRequestV0H\x00\x1a\xf7\x03\n*GetContestedResourceIdentityVotesRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12+\n\x05limit\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12,\n\x06offset\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x17\n\x0forder_ascending\x18\x04 \x01(\x08\x12\xae\x01\n\x1astart_at_vote_poll_id_info\x18\x05 \x01(\x0b\x32\x84\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesRequest.GetContestedResourceIdentityVotesRequestV0.StartAtVotePollIdInfoH\x00\x88\x01\x01\x12\r\n\x05prove\x18\x06 \x01(\x08\x1a\x61\n\x15StartAtVotePollIdInfo\x12 \n\x18start_at_poll_identifier\x18\x01 \x01(\x0c\x12&\n\x1estart_poll_identifier_included\x18\x02 \x01(\x08\x42\x1d\n\x1b_start_at_vote_poll_id_infoB\t\n\x07version\"\xc8\n\n)GetContestedResourceIdentityVotesResponse\x12~\n\x02v0\x18\x01 \x01(\x0b\x32p.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0H\x00\x1a\x8f\t\n+GetContestedResourceIdentityVotesResponseV0\x12\xa1\x01\n\x05votes\x18\x01 \x01(\x0b\x32\x8f\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ContestedResourceIdentityVotesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\xf7\x01\n\x1e\x43ontestedResourceIdentityVotes\x12\xba\x01\n!contested_resource_identity_votes\x18\x01 \x03(\x0b\x32\x8e\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ContestedResourceIdentityVote\x12\x18\n\x10\x66inished_results\x18\x02 \x01(\x08\x1a\xad\x02\n\x12ResourceVoteChoice\x12\xad\x01\n\x10vote_choice_type\x18\x01 \x01(\x0e\x32\x92\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ResourceVoteChoice.VoteChoiceType\x12\x18\n\x0bidentity_id\x18\x02 \x01(\x0cH\x00\x88\x01\x01\"=\n\x0eVoteChoiceType\x12\x14\n\x10TOWARDS_IDENTITY\x10\x00\x12\x0b\n\x07\x41\x42STAIN\x10\x01\x12\x08\n\x04LOCK\x10\x02\x42\x0e\n\x0c_identity_id\x1a\x95\x02\n\x1d\x43ontestedResourceIdentityVote\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1a\n\x12\x64ocument_type_name\x18\x02 \x01(\t\x12\'\n\x1fserialized_index_storage_values\x18\x03 \x03(\x0c\x12\x99\x01\n\x0bvote_choice\x18\x04 \x01(\x0b\x32\x83\x01.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse.GetContestedResourceIdentityVotesResponseV0.ResourceVoteChoiceB\x08\n\x06resultB\t\n\x07version\"\xf0\x01\n%GetPrefundedSpecializedBalanceRequest\x12v\n\x02v0\x18\x01 \x01(\x0b\x32h.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceRequest.GetPrefundedSpecializedBalanceRequestV0H\x00\x1a\x44\n\'GetPrefundedSpecializedBalanceRequestV0\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xed\x02\n&GetPrefundedSpecializedBalanceResponse\x12x\n\x02v0\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceResponse.GetPrefundedSpecializedBalanceResponseV0H\x00\x1a\xbd\x01\n(GetPrefundedSpecializedBalanceResponseV0\x12\x15\n\x07\x62\x61lance\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xd0\x01\n GetTotalCreditsInPlatformRequest\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformRequest.GetTotalCreditsInPlatformRequestV0H\x00\x1a\x33\n\"GetTotalCreditsInPlatformRequestV0\x12\r\n\x05prove\x18\x01 \x01(\x08\x42\t\n\x07version\"\xd9\x02\n!GetTotalCreditsInPlatformResponse\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformResponse.GetTotalCreditsInPlatformResponseV0H\x00\x1a\xb8\x01\n#GetTotalCreditsInPlatformResponseV0\x12\x15\n\x07\x63redits\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x08\n\x06resultB\t\n\x07version\"\xc4\x01\n\x16GetPathElementsRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetPathElementsRequest.GetPathElementsRequestV0H\x00\x1a\x45\n\x18GetPathElementsRequestV0\x12\x0c\n\x04path\x18\x01 \x03(\x0c\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xa3\x03\n\x17GetPathElementsResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetPathElementsResponse.GetPathElementsResponseV0H\x00\x1a\xa0\x02\n\x19GetPathElementsResponseV0\x12i\n\x08\x65lements\x18\x01 \x01(\x0b\x32U.org.dash.platform.dapi.v0.GetPathElementsResponse.GetPathElementsResponseV0.ElementsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x1c\n\x08\x45lements\x12\x10\n\x08\x65lements\x18\x01 \x03(\x0c\x42\x08\n\x06resultB\t\n\x07version\"\x81\x01\n\x10GetStatusRequest\x12L\n\x02v0\x18\x01 \x01(\x0b\x32>.org.dash.platform.dapi.v0.GetStatusRequest.GetStatusRequestV0H\x00\x1a\x14\n\x12GetStatusRequestV0B\t\n\x07version\"\xd0\x10\n\x11GetStatusResponse\x12N\n\x02v0\x18\x01 \x01(\x0b\x32@.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0H\x00\x1a\xdf\x0f\n\x13GetStatusResponseV0\x12Y\n\x07version\x18\x01 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version\x12S\n\x04node\x18\x02 \x01(\x0b\x32\x45.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Node\x12U\n\x05\x63hain\x18\x03 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Chain\x12Y\n\x07network\x18\x04 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Network\x12^\n\nstate_sync\x18\x05 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.StateSync\x12S\n\x04time\x18\x06 \x01(\x0b\x32\x45.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Time\x1a\xee\x04\n\x07Version\x12\x63\n\x08software\x18\x01 \x01(\x0b\x32Q.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Software\x12\x63\n\x08protocol\x18\x02 \x01(\x0b\x32Q.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Protocol\x1a^\n\x08Software\x12\x0c\n\x04\x64\x61pi\x18\x01 \x01(\t\x12\x12\n\x05\x64rive\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x17\n\ntenderdash\x18\x03 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_driveB\r\n\x0b_tenderdash\x1a\xb8\x02\n\x08Protocol\x12p\n\ntenderdash\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Protocol.Tenderdash\x12\x66\n\x05\x64rive\x18\x02 \x01(\x0b\x32W.org.dash.platform.dapi.v0.GetStatusResponse.GetStatusResponseV0.Version.Protocol.Drive\x1a(\n\nTenderdash\x12\x0b\n\x03p2p\x18\x01 \x01(\r\x12\r\n\x05\x62lock\x18\x02 \x01(\r\x1a(\n\x05\x44rive\x12\x0e\n\x06latest\x18\x03 \x01(\r\x12\x0f\n\x07\x63urrent\x18\x04 \x01(\r\x1a\x7f\n\x04Time\x12\x11\n\x05local\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x16\n\x05\x62lock\x18\x02 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01\x12\x18\n\x07genesis\x18\x03 \x01(\x04\x42\x02\x30\x01H\x01\x88\x01\x01\x12\x12\n\x05\x65poch\x18\x04 \x01(\rH\x02\x88\x01\x01\x42\x08\n\x06_blockB\n\n\x08_genesisB\x08\n\x06_epoch\x1a<\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\x18\n\x0bpro_tx_hash\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x42\x0e\n\x0c_pro_tx_hash\x1a\xb3\x02\n\x05\x43hain\x12\x13\n\x0b\x63\x61tching_up\x18\x01 \x01(\x08\x12\x19\n\x11latest_block_hash\x18\x02 \x01(\x0c\x12\x17\n\x0flatest_app_hash\x18\x03 \x01(\x0c\x12\x1f\n\x13latest_block_height\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x1b\n\x13\x65\x61rliest_block_hash\x18\x05 \x01(\x0c\x12\x19\n\x11\x65\x61rliest_app_hash\x18\x06 \x01(\x0c\x12!\n\x15\x65\x61rliest_block_height\x18\x07 \x01(\x04\x42\x02\x30\x01\x12!\n\x15max_peer_block_height\x18\t \x01(\x04\x42\x02\x30\x01\x12%\n\x18\x63ore_chain_locked_height\x18\n \x01(\rH\x00\x88\x01\x01\x42\x1b\n\x19_core_chain_locked_height\x1a\x43\n\x07Network\x12\x10\n\x08\x63hain_id\x18\x01 \x01(\t\x12\x13\n\x0bpeers_count\x18\x02 \x01(\r\x12\x11\n\tlistening\x18\x03 \x01(\x08\x1a\x85\x02\n\tStateSync\x12\x1d\n\x11total_synced_time\x18\x01 \x01(\x04\x42\x02\x30\x01\x12\x1a\n\x0eremaining_time\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x17\n\x0ftotal_snapshots\x18\x03 \x01(\r\x12\"\n\x16\x63hunk_process_avg_time\x18\x04 \x01(\x04\x42\x02\x30\x01\x12\x1b\n\x0fsnapshot_height\x18\x05 \x01(\x04\x42\x02\x30\x01\x12!\n\x15snapshot_chunks_count\x18\x06 \x01(\x04\x42\x02\x30\x01\x12\x1d\n\x11\x62\x61\x63kfilled_blocks\x18\x07 \x01(\x04\x42\x02\x30\x01\x12!\n\x15\x62\x61\x63kfill_blocks_total\x18\x08 \x01(\x04\x42\x02\x30\x01\x42\t\n\x07version\"\xb1\x01\n\x1cGetCurrentQuorumsInfoRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoRequest.GetCurrentQuorumsInfoRequestV0H\x00\x1a \n\x1eGetCurrentQuorumsInfoRequestV0B\t\n\x07version\"\xa1\x05\n\x1dGetCurrentQuorumsInfoResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse.GetCurrentQuorumsInfoResponseV0H\x00\x1a\x46\n\x0bValidatorV0\x12\x13\n\x0bpro_tx_hash\x18\x01 \x01(\x0c\x12\x0f\n\x07node_ip\x18\x02 \x01(\t\x12\x11\n\tis_banned\x18\x03 \x01(\x08\x1a\xaf\x01\n\x0eValidatorSetV0\x12\x13\n\x0bquorum_hash\x18\x01 \x01(\x0c\x12\x13\n\x0b\x63ore_height\x18\x02 \x01(\r\x12U\n\x07members\x18\x03 \x03(\x0b\x32\x44.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse.ValidatorV0\x12\x1c\n\x14threshold_public_key\x18\x04 \x01(\x0c\x1a\x92\x02\n\x1fGetCurrentQuorumsInfoResponseV0\x12\x15\n\rquorum_hashes\x18\x01 \x03(\x0c\x12\x1b\n\x13\x63urrent_quorum_hash\x18\x02 \x01(\x0c\x12_\n\x0evalidator_sets\x18\x03 \x03(\x0b\x32G.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse.ValidatorSetV0\x12\x1b\n\x13last_block_proposer\x18\x04 \x01(\x0c\x12=\n\x08metadata\x18\x05 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\t\n\x07version\"\xf4\x01\n\x1fGetIdentityTokenBalancesRequest\x12j\n\x02v0\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetIdentityTokenBalancesRequest.GetIdentityTokenBalancesRequestV0H\x00\x1aZ\n!GetIdentityTokenBalancesRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xad\x05\n GetIdentityTokenBalancesResponse\x12l\n\x02v0\x18\x01 \x01(\x0b\x32^.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse.GetIdentityTokenBalancesResponseV0H\x00\x1a\x8f\x04\n\"GetIdentityTokenBalancesResponseV0\x12\x86\x01\n\x0etoken_balances\x18\x01 \x01(\x0b\x32l.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse.GetIdentityTokenBalancesResponseV0.TokenBalancesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aG\n\x11TokenBalanceEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x14\n\x07\x62\x61lance\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_balance\x1a\x9a\x01\n\rTokenBalances\x12\x88\x01\n\x0etoken_balances\x18\x01 \x03(\x0b\x32p.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse.GetIdentityTokenBalancesResponseV0.TokenBalanceEntryB\x08\n\x06resultB\t\n\x07version\"\xfc\x01\n!GetIdentitiesTokenBalancesRequest\x12n\n\x02v0\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesRequest.GetIdentitiesTokenBalancesRequestV0H\x00\x1a\\\n#GetIdentitiesTokenBalancesRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x14\n\x0cidentity_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xf2\x05\n\"GetIdentitiesTokenBalancesResponse\x12p\n\x02v0\x18\x01 \x01(\x0b\x32\x62.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse.GetIdentitiesTokenBalancesResponseV0H\x00\x1a\xce\x04\n$GetIdentitiesTokenBalancesResponseV0\x12\x9b\x01\n\x17identity_token_balances\x18\x01 \x01(\x0b\x32x.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse.GetIdentitiesTokenBalancesResponseV0.IdentityTokenBalancesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1aR\n\x19IdentityTokenBalanceEntry\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x14\n\x07\x62\x61lance\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_balance\x1a\xb7\x01\n\x15IdentityTokenBalances\x12\x9d\x01\n\x17identity_token_balances\x18\x01 \x03(\x0b\x32|.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse.GetIdentitiesTokenBalancesResponseV0.IdentityTokenBalanceEntryB\x08\n\x06resultB\t\n\x07version\"\xe8\x01\n\x1cGetIdentityTokenInfosRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetIdentityTokenInfosRequest.GetIdentityTokenInfosRequestV0H\x00\x1aW\n\x1eGetIdentityTokenInfosRequestV0\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x11\n\ttoken_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\x98\x06\n\x1dGetIdentityTokenInfosResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0H\x00\x1a\x83\x05\n\x1fGetIdentityTokenInfosResponseV0\x12z\n\x0btoken_infos\x18\x01 \x01(\x0b\x32\x63.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0.TokenInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a(\n\x16TokenIdentityInfoEntry\x12\x0e\n\x06\x66rozen\x18\x01 \x01(\x08\x1a\xb0\x01\n\x0eTokenInfoEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x82\x01\n\x04info\x18\x02 \x01(\x0b\x32o.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0.TokenIdentityInfoEntryH\x00\x88\x01\x01\x42\x07\n\x05_info\x1a\x8a\x01\n\nTokenInfos\x12|\n\x0btoken_infos\x18\x01 \x03(\x0b\x32g.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse.GetIdentityTokenInfosResponseV0.TokenInfoEntryB\x08\n\x06resultB\t\n\x07version\"\xf0\x01\n\x1eGetIdentitiesTokenInfosRequest\x12h\n\x02v0\x18\x01 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosRequest.GetIdentitiesTokenInfosRequestV0H\x00\x1aY\n GetIdentitiesTokenInfosRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x14\n\x0cidentity_ids\x18\x02 \x03(\x0c\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xca\x06\n\x1fGetIdentitiesTokenInfosResponse\x12j\n\x02v0\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0H\x00\x1a\xaf\x05\n!GetIdentitiesTokenInfosResponseV0\x12\x8f\x01\n\x14identity_token_infos\x18\x01 \x01(\x0b\x32o.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0.IdentityTokenInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a(\n\x16TokenIdentityInfoEntry\x12\x0e\n\x06\x66rozen\x18\x01 \x01(\x08\x1a\xb7\x01\n\x0eTokenInfoEntry\x12\x13\n\x0bidentity_id\x18\x01 \x01(\x0c\x12\x86\x01\n\x04info\x18\x02 \x01(\x0b\x32s.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0.TokenIdentityInfoEntryH\x00\x88\x01\x01\x42\x07\n\x05_info\x1a\x97\x01\n\x12IdentityTokenInfos\x12\x80\x01\n\x0btoken_infos\x18\x01 \x03(\x0b\x32k.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse.GetIdentitiesTokenInfosResponseV0.TokenInfoEntryB\x08\n\x06resultB\t\n\x07version\"\xbf\x01\n\x17GetTokenStatusesRequest\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetTokenStatusesRequest.GetTokenStatusesRequestV0H\x00\x1a=\n\x19GetTokenStatusesRequestV0\x12\x11\n\ttoken_ids\x18\x01 \x03(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xe7\x04\n\x18GetTokenStatusesResponse\x12\\\n\x02v0\x18\x01 \x01(\x0b\x32N.org.dash.platform.dapi.v0.GetTokenStatusesResponse.GetTokenStatusesResponseV0H\x00\x1a\xe1\x03\n\x1aGetTokenStatusesResponseV0\x12v\n\x0etoken_statuses\x18\x01 \x01(\x0b\x32\\.org.dash.platform.dapi.v0.GetTokenStatusesResponse.GetTokenStatusesResponseV0.TokenStatusesH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x44\n\x10TokenStatusEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x13\n\x06paused\x18\x02 \x01(\x08H\x00\x88\x01\x01\x42\t\n\x07_paused\x1a\x88\x01\n\rTokenStatuses\x12w\n\x0etoken_statuses\x18\x01 \x03(\x0b\x32_.org.dash.platform.dapi.v0.GetTokenStatusesResponse.GetTokenStatusesResponseV0.TokenStatusEntryB\x08\n\x06resultB\t\n\x07version\"\xef\x04\n)GetTokenPreProgrammedDistributionsRequest\x12~\n\x02v0\x18\x01 \x01(\x0b\x32p.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsRequest.GetTokenPreProgrammedDistributionsRequestV0H\x00\x1a\xb6\x03\n+GetTokenPreProgrammedDistributionsRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x98\x01\n\rstart_at_info\x18\x02 \x01(\x0b\x32|.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsRequest.GetTokenPreProgrammedDistributionsRequestV0.StartAtInfoH\x00\x88\x01\x01\x12\x12\n\x05limit\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\x04 \x01(\x08\x1a\x9a\x01\n\x0bStartAtInfo\x12\x15\n\rstart_time_ms\x18\x01 \x01(\x04\x12\x1c\n\x0fstart_recipient\x18\x02 \x01(\x0cH\x00\x88\x01\x01\x12%\n\x18start_recipient_included\x18\x03 \x01(\x08H\x01\x88\x01\x01\x42\x12\n\x10_start_recipientB\x1b\n\x19_start_recipient_includedB\x10\n\x0e_start_at_infoB\x08\n\x06_limitB\t\n\x07version\"\xec\x07\n*GetTokenPreProgrammedDistributionsResponse\x12\x80\x01\n\x02v0\x18\x01 \x01(\x0b\x32r.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0H\x00\x1a\xaf\x06\n,GetTokenPreProgrammedDistributionsResponseV0\x12\xa5\x01\n\x13token_distributions\x18\x01 \x01(\x0b\x32\x85\x01.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0.TokenDistributionsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a>\n\x16TokenDistributionEntry\x12\x14\n\x0crecipient_id\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x04\x1a\xd4\x01\n\x1bTokenTimedDistributionEntry\x12\x11\n\ttimestamp\x18\x01 \x01(\x04\x12\xa1\x01\n\rdistributions\x18\x02 \x03(\x0b\x32\x89\x01.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0.TokenDistributionEntry\x1a\xc3\x01\n\x12TokenDistributions\x12\xac\x01\n\x13token_distributions\x18\x01 \x03(\x0b\x32\x8e\x01.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse.GetTokenPreProgrammedDistributionsResponseV0.TokenTimedDistributionEntryB\x08\n\x06resultB\t\n\x07version\"\xca\x01\n\x1aGetTokenTotalSupplyRequest\x12`\n\x02v0\x18\x01 \x01(\x0b\x32R.org.dash.platform.dapi.v0.GetTokenTotalSupplyRequest.GetTokenTotalSupplyRequestV0H\x00\x1a?\n\x1cGetTokenTotalSupplyRequestV0\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\x42\t\n\x07version\"\xaf\x04\n\x1bGetTokenTotalSupplyResponse\x12\x62\n\x02v0\x18\x01 \x01(\x0b\x32T.org.dash.platform.dapi.v0.GetTokenTotalSupplyResponse.GetTokenTotalSupplyResponseV0H\x00\x1a\xa0\x03\n\x1dGetTokenTotalSupplyResponseV0\x12\x88\x01\n\x12token_total_supply\x18\x01 \x01(\x0b\x32j.org.dash.platform.dapi.v0.GetTokenTotalSupplyResponse.GetTokenTotalSupplyResponseV0.TokenTotalSupplyEntryH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1ax\n\x15TokenTotalSupplyEntry\x12\x10\n\x08token_id\x18\x01 \x01(\x0c\x12\x30\n(total_aggregated_amount_in_user_accounts\x18\x02 \x01(\x04\x12\x1b\n\x13total_system_amount\x18\x03 \x01(\x04\x42\x08\n\x06resultB\t\n\x07version\"\xd2\x01\n\x13GetGroupInfoRequest\x12R\n\x02v0\x18\x01 \x01(\x0b\x32\x44.org.dash.platform.dapi.v0.GetGroupInfoRequest.GetGroupInfoRequestV0H\x00\x1a\\\n\x15GetGroupInfoRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1f\n\x17group_contract_position\x18\x02 \x01(\r\x12\r\n\x05prove\x18\x03 \x01(\x08\x42\t\n\x07version\"\xd4\x05\n\x14GetGroupInfoResponse\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0H\x00\x1a\xda\x04\n\x16GetGroupInfoResponseV0\x12\x66\n\ngroup_info\x18\x01 \x01(\x0b\x32P.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0.GroupInfoH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x04 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x34\n\x10GroupMemberEntry\x12\x11\n\tmember_id\x18\x01 \x01(\x0c\x12\r\n\x05power\x18\x02 \x01(\r\x1a\x98\x01\n\x0eGroupInfoEntry\x12h\n\x07members\x18\x01 \x03(\x0b\x32W.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0.GroupMemberEntry\x12\x1c\n\x14group_required_power\x18\x02 \x01(\r\x1a\x8a\x01\n\tGroupInfo\x12n\n\ngroup_info\x18\x01 \x01(\x0b\x32U.org.dash.platform.dapi.v0.GetGroupInfoResponse.GetGroupInfoResponseV0.GroupInfoEntryH\x00\x88\x01\x01\x42\r\n\x0b_group_infoB\x08\n\x06resultB\t\n\x07version\"\xed\x03\n\x14GetGroupInfosRequest\x12T\n\x02v0\x18\x01 \x01(\x0b\x32\x46.org.dash.platform.dapi.v0.GetGroupInfosRequest.GetGroupInfosRequestV0H\x00\x1au\n\x1cStartAtGroupContractPosition\x12%\n\x1dstart_group_contract_position\x18\x01 \x01(\r\x12.\n&start_group_contract_position_included\x18\x02 \x01(\x08\x1a\xfc\x01\n\x16GetGroupInfosRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12{\n start_at_group_contract_position\x18\x02 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetGroupInfosRequest.StartAtGroupContractPositionH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\x04 \x01(\x08\x42#\n!_start_at_group_contract_positionB\x08\n\x06_countB\t\n\x07version\"\xff\x05\n\x15GetGroupInfosResponse\x12V\n\x02v0\x18\x01 \x01(\x0b\x32H.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0H\x00\x1a\x82\x05\n\x17GetGroupInfosResponseV0\x12j\n\x0bgroup_infos\x18\x01 \x01(\x0b\x32S.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0.GroupInfosH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x04 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x34\n\x10GroupMemberEntry\x12\x11\n\tmember_id\x18\x01 \x01(\x0c\x12\r\n\x05power\x18\x02 \x01(\r\x1a\xc3\x01\n\x16GroupPositionInfoEntry\x12\x1f\n\x17group_contract_position\x18\x01 \x01(\r\x12j\n\x07members\x18\x02 \x03(\x0b\x32Y.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0.GroupMemberEntry\x12\x1c\n\x14group_required_power\x18\x03 \x01(\r\x1a\x82\x01\n\nGroupInfos\x12t\n\x0bgroup_infos\x18\x01 \x03(\x0b\x32_.org.dash.platform.dapi.v0.GetGroupInfosResponse.GetGroupInfosResponseV0.GroupPositionInfoEntryB\x08\n\x06resultB\t\n\x07version\"\xbe\x04\n\x16GetGroupActionsRequest\x12X\n\x02v0\x18\x01 \x01(\x0b\x32J.org.dash.platform.dapi.v0.GetGroupActionsRequest.GetGroupActionsRequestV0H\x00\x1aL\n\x0fStartAtActionId\x12\x17\n\x0fstart_action_id\x18\x01 \x01(\x0c\x12 \n\x18start_action_id_included\x18\x02 \x01(\x08\x1a\xc8\x02\n\x18GetGroupActionsRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1f\n\x17group_contract_position\x18\x02 \x01(\r\x12N\n\x06status\x18\x03 \x01(\x0e\x32>.org.dash.platform.dapi.v0.GetGroupActionsRequest.ActionStatus\x12\x62\n\x12start_at_action_id\x18\x04 \x01(\x0b\x32\x41.org.dash.platform.dapi.v0.GetGroupActionsRequest.StartAtActionIdH\x00\x88\x01\x01\x12\x12\n\x05\x63ount\x18\x05 \x01(\rH\x01\x88\x01\x01\x12\r\n\x05prove\x18\x06 \x01(\x08\x42\x15\n\x13_start_at_action_idB\x08\n\x06_count\"&\n\x0c\x41\x63tionStatus\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\n\n\x06\x43LOSED\x10\x01\x42\t\n\x07version\"\xd1\x19\n\x17GetGroupActionsResponse\x12Z\n\x02v0\x18\x01 \x01(\x0b\x32L.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0H\x00\x1a\xce\x18\n\x19GetGroupActionsResponseV0\x12r\n\rgroup_actions\x18\x01 \x01(\x0b\x32Y.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.GroupActionsH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a[\n\tMintEvent\x12\x0e\n\x06\x61mount\x18\x01 \x01(\x04\x12\x14\n\x0crecipient_id\x18\x02 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\x45\n\tBurnEvent\x12\x0e\n\x06\x61mount\x18\x01 \x01(\x04\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1aJ\n\x0b\x46reezeEvent\x12\x11\n\tfrozen_id\x18\x01 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1aL\n\rUnfreezeEvent\x12\x11\n\tfrozen_id\x18\x01 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\x66\n\x17\x44\x65stroyFrozenFundsEvent\x12\x11\n\tfrozen_id\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x04\x12\x18\n\x0bpublic_note\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\x64\n\x13SharedEncryptedNote\x12\x18\n\x10sender_key_index\x18\x01 \x01(\r\x12\x1b\n\x13recipient_key_index\x18\x02 \x01(\r\x12\x16\n\x0e\x65ncrypted_data\x18\x03 \x01(\x0c\x1a{\n\x15PersonalEncryptedNote\x12!\n\x19root_encryption_key_index\x18\x01 \x01(\r\x12\'\n\x1f\x64\x65rivation_encryption_key_index\x18\x02 \x01(\r\x12\x16\n\x0e\x65ncrypted_data\x18\x03 \x01(\x0c\x1a\xe9\x01\n\x14\x45mergencyActionEvent\x12\x81\x01\n\x0b\x61\x63tion_type\x18\x01 \x01(\x0e\x32l.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.ActionType\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\"#\n\nActionType\x12\t\n\x05PAUSE\x10\x00\x12\n\n\x06RESUME\x10\x01\x42\x0e\n\x0c_public_note\x1a\x64\n\x16TokenConfigUpdateEvent\x12 \n\x18token_config_update_item\x18\x01 \x01(\x0c\x12\x18\n\x0bpublic_note\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_public_note\x1a\xfc\x02\n\x10GroupActionEvent\x12n\n\x0btoken_event\x18\x01 \x01(\x0b\x32W.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEventH\x00\x12t\n\x0e\x64ocument_event\x18\x02 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DocumentEventH\x00\x12t\n\x0e\x63ontract_event\x18\x03 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ContractEventH\x00\x42\x0c\n\nevent_type\x1a\x8b\x01\n\rDocumentEvent\x12r\n\x06\x63reate\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DocumentCreateEventH\x00\x42\x06\n\x04type\x1a/\n\x13\x44ocumentCreateEvent\x12\x18\n\x10\x63reated_document\x18\x01 \x01(\x0c\x1a/\n\x13\x43ontractUpdateEvent\x12\x18\n\x10updated_contract\x18\x01 \x01(\x0c\x1a\x8b\x01\n\rContractEvent\x12r\n\x06update\x18\x01 \x01(\x0b\x32`.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.ContractUpdateEventH\x00\x42\x06\n\x04type\x1a\xcb\x06\n\nTokenEvent\x12\x66\n\x04mint\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.MintEventH\x00\x12\x66\n\x04\x62urn\x18\x02 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.BurnEventH\x00\x12j\n\x06\x66reeze\x18\x03 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.FreezeEventH\x00\x12n\n\x08unfreeze\x18\x04 \x01(\x0b\x32Z.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEventH\x00\x12\x84\x01\n\x14\x64\x65stroy_frozen_funds\x18\x05 \x01(\x0b\x32\x64.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEventH\x00\x12}\n\x10\x65mergency_action\x18\x06 \x01(\x0b\x32\x61.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEventH\x00\x12\x82\x01\n\x13token_config_update\x18\x07 \x01(\x0b\x32\x63.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEventH\x00\x42\x06\n\x04type\x1a\x93\x01\n\x10GroupActionEntry\x12\x11\n\taction_id\x18\x01 \x01(\x0c\x12l\n\x05\x65vent\x18\x02 \x01(\x0b\x32].org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.GroupActionEvent\x1a\x84\x01\n\x0cGroupActions\x12t\n\rgroup_actions\x18\x01 \x03(\x0b\x32].org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.GroupActionEntryB\x08\n\x06resultB\t\n\x07version\"\x88\x03\n\x1cGetGroupActionSignersRequest\x12\x64\n\x02v0\x18\x01 \x01(\x0b\x32V.org.dash.platform.dapi.v0.GetGroupActionSignersRequest.GetGroupActionSignersRequestV0H\x00\x1a\xce\x01\n\x1eGetGroupActionSignersRequestV0\x12\x13\n\x0b\x63ontract_id\x18\x01 \x01(\x0c\x12\x1f\n\x17group_contract_position\x18\x02 \x01(\r\x12T\n\x06status\x18\x03 \x01(\x0e\x32\x44.org.dash.platform.dapi.v0.GetGroupActionSignersRequest.ActionStatus\x12\x11\n\taction_id\x18\x04 \x01(\x0c\x12\r\n\x05prove\x18\x05 \x01(\x08\"&\n\x0c\x41\x63tionStatus\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\n\n\x06\x43LOSED\x10\x01\x42\t\n\x07version\"\x8b\x05\n\x1dGetGroupActionSignersResponse\x12\x66\n\x02v0\x18\x01 \x01(\x0b\x32X.org.dash.platform.dapi.v0.GetGroupActionSignersResponse.GetGroupActionSignersResponseV0H\x00\x1a\xf6\x03\n\x1fGetGroupActionSignersResponseV0\x12\x8b\x01\n\x14group_action_signers\x18\x01 \x01(\x0b\x32k.org.dash.platform.dapi.v0.GetGroupActionSignersResponse.GetGroupActionSignersResponseV0.GroupActionSignersH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\x1a\x35\n\x11GroupActionSigner\x12\x11\n\tsigner_id\x18\x01 \x01(\x0c\x12\r\n\x05power\x18\x02 \x01(\r\x1a\x91\x01\n\x12GroupActionSigners\x12{\n\x07signers\x18\x01 \x03(\x0b\x32j.org.dash.platform.dapi.v0.GetGroupActionSignersResponse.GetGroupActionSignersResponseV0.GroupActionSignerB\x08\n\x06resultB\t\n\x07version*Z\n\nKeyPurpose\x12\x12\n\x0e\x41UTHENTICATION\x10\x00\x12\x0e\n\nENCRYPTION\x10\x01\x12\x0e\n\nDECRYPTION\x10\x02\x12\x0c\n\x08TRANSFER\x10\x03\x12\n\n\x06VOTING\x10\x05\x32\x9a\x30\n\x08Platform\x12\x93\x01\n\x18\x62roadcastStateTransition\x12:.org.dash.platform.dapi.v0.BroadcastStateTransitionRequest\x1a;.org.dash.platform.dapi.v0.BroadcastStateTransitionResponse\x12l\n\x0bgetIdentity\x12-.org.dash.platform.dapi.v0.GetIdentityRequest\x1a..org.dash.platform.dapi.v0.GetIdentityResponse\x12x\n\x0fgetIdentityKeys\x12\x31.org.dash.platform.dapi.v0.GetIdentityKeysRequest\x1a\x32.org.dash.platform.dapi.v0.GetIdentityKeysResponse\x12\x96\x01\n\x19getIdentitiesContractKeys\x12;.org.dash.platform.dapi.v0.GetIdentitiesContractKeysRequest\x1a<.org.dash.platform.dapi.v0.GetIdentitiesContractKeysResponse\x12{\n\x10getIdentityNonce\x12\x32.org.dash.platform.dapi.v0.GetIdentityNonceRequest\x1a\x33.org.dash.platform.dapi.v0.GetIdentityNonceResponse\x12\x93\x01\n\x18getIdentityContractNonce\x12:.org.dash.platform.dapi.v0.GetIdentityContractNonceRequest\x1a;.org.dash.platform.dapi.v0.GetIdentityContractNonceResponse\x12\x81\x01\n\x12getIdentityBalance\x12\x34.org.dash.platform.dapi.v0.GetIdentityBalanceRequest\x1a\x35.org.dash.platform.dapi.v0.GetIdentityBalanceResponse\x12\x8a\x01\n\x15getIdentitiesBalances\x12\x37.org.dash.platform.dapi.v0.GetIdentitiesBalancesRequest\x1a\x38.org.dash.platform.dapi.v0.GetIdentitiesBalancesResponse\x12\xa2\x01\n\x1dgetIdentityBalanceAndRevision\x12?.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionRequest\x1a@.org.dash.platform.dapi.v0.GetIdentityBalanceAndRevisionResponse\x12\xaf\x01\n#getEvonodesProposedEpochBlocksByIds\x12\x45.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByIdsRequest\x1a\x41.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse\x12\xb3\x01\n%getEvonodesProposedEpochBlocksByRange\x12G.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksByRangeRequest\x1a\x41.org.dash.platform.dapi.v0.GetEvonodesProposedEpochBlocksResponse\x12\x66\n\tgetProofs\x12+.org.dash.platform.dapi.v0.GetProofsRequest\x1a,.org.dash.platform.dapi.v0.GetProofsResponse\x12x\n\x0fgetDataContract\x12\x31.org.dash.platform.dapi.v0.GetDataContractRequest\x1a\x32.org.dash.platform.dapi.v0.GetDataContractResponse\x12\x8d\x01\n\x16getDataContractHistory\x12\x38.org.dash.platform.dapi.v0.GetDataContractHistoryRequest\x1a\x39.org.dash.platform.dapi.v0.GetDataContractHistoryResponse\x12{\n\x10getDataContracts\x12\x32.org.dash.platform.dapi.v0.GetDataContractsRequest\x1a\x33.org.dash.platform.dapi.v0.GetDataContractsResponse\x12o\n\x0cgetDocuments\x12..org.dash.platform.dapi.v0.GetDocumentsRequest\x1a/.org.dash.platform.dapi.v0.GetDocumentsResponse\x12\x99\x01\n\x1agetIdentityByPublicKeyHash\x12<.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashRequest\x1a=.org.dash.platform.dapi.v0.GetIdentityByPublicKeyHashResponse\x12\x9f\x01\n\x1cwaitForStateTransitionResult\x12>.org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest\x1a?.org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse\x12\x81\x01\n\x12getConsensusParams\x12\x34.org.dash.platform.dapi.v0.GetConsensusParamsRequest\x1a\x35.org.dash.platform.dapi.v0.GetConsensusParamsResponse\x12\xa5\x01\n\x1egetProtocolVersionUpgradeState\x12@.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateRequest\x1a\x41.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeStateResponse\x12\xb4\x01\n#getProtocolVersionUpgradeVoteStatus\x12\x45.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusRequest\x1a\x46.org.dash.platform.dapi.v0.GetProtocolVersionUpgradeVoteStatusResponse\x12r\n\rgetEpochsInfo\x12/.org.dash.platform.dapi.v0.GetEpochsInfoRequest\x1a\x30.org.dash.platform.dapi.v0.GetEpochsInfoResponse\x12\x8a\x01\n\x15getContestedResources\x12\x37.org.dash.platform.dapi.v0.GetContestedResourcesRequest\x1a\x38.org.dash.platform.dapi.v0.GetContestedResourcesResponse\x12\xa2\x01\n\x1dgetContestedResourceVoteState\x12?.org.dash.platform.dapi.v0.GetContestedResourceVoteStateRequest\x1a@.org.dash.platform.dapi.v0.GetContestedResourceVoteStateResponse\x12\xba\x01\n%getContestedResourceVotersForIdentity\x12G.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityRequest\x1aH.org.dash.platform.dapi.v0.GetContestedResourceVotersForIdentityResponse\x12\xae\x01\n!getContestedResourceIdentityVotes\x12\x43.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesRequest\x1a\x44.org.dash.platform.dapi.v0.GetContestedResourceIdentityVotesResponse\x12\x8a\x01\n\x15getVotePollsByEndDate\x12\x37.org.dash.platform.dapi.v0.GetVotePollsByEndDateRequest\x1a\x38.org.dash.platform.dapi.v0.GetVotePollsByEndDateResponse\x12\xa5\x01\n\x1egetPrefundedSpecializedBalance\x12@.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceRequest\x1a\x41.org.dash.platform.dapi.v0.GetPrefundedSpecializedBalanceResponse\x12\x96\x01\n\x19getTotalCreditsInPlatform\x12;.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformRequest\x1a<.org.dash.platform.dapi.v0.GetTotalCreditsInPlatformResponse\x12x\n\x0fgetPathElements\x12\x31.org.dash.platform.dapi.v0.GetPathElementsRequest\x1a\x32.org.dash.platform.dapi.v0.GetPathElementsResponse\x12\x66\n\tgetStatus\x12+.org.dash.platform.dapi.v0.GetStatusRequest\x1a,.org.dash.platform.dapi.v0.GetStatusResponse\x12\x8a\x01\n\x15getCurrentQuorumsInfo\x12\x37.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoRequest\x1a\x38.org.dash.platform.dapi.v0.GetCurrentQuorumsInfoResponse\x12\x93\x01\n\x18getIdentityTokenBalances\x12:.org.dash.platform.dapi.v0.GetIdentityTokenBalancesRequest\x1a;.org.dash.platform.dapi.v0.GetIdentityTokenBalancesResponse\x12\x99\x01\n\x1agetIdentitiesTokenBalances\x12<.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesRequest\x1a=.org.dash.platform.dapi.v0.GetIdentitiesTokenBalancesResponse\x12\x8a\x01\n\x15getIdentityTokenInfos\x12\x37.org.dash.platform.dapi.v0.GetIdentityTokenInfosRequest\x1a\x38.org.dash.platform.dapi.v0.GetIdentityTokenInfosResponse\x12\x90\x01\n\x17getIdentitiesTokenInfos\x12\x39.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosRequest\x1a:.org.dash.platform.dapi.v0.GetIdentitiesTokenInfosResponse\x12{\n\x10getTokenStatuses\x12\x32.org.dash.platform.dapi.v0.GetTokenStatusesRequest\x1a\x33.org.dash.platform.dapi.v0.GetTokenStatusesResponse\x12\xb1\x01\n\"getTokenPreProgrammedDistributions\x12\x44.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsRequest\x1a\x45.org.dash.platform.dapi.v0.GetTokenPreProgrammedDistributionsResponse\x12\x84\x01\n\x13getTokenTotalSupply\x12\x35.org.dash.platform.dapi.v0.GetTokenTotalSupplyRequest\x1a\x36.org.dash.platform.dapi.v0.GetTokenTotalSupplyResponse\x12o\n\x0cgetGroupInfo\x12..org.dash.platform.dapi.v0.GetGroupInfoRequest\x1a/.org.dash.platform.dapi.v0.GetGroupInfoResponse\x12r\n\rgetGroupInfos\x12/.org.dash.platform.dapi.v0.GetGroupInfosRequest\x1a\x30.org.dash.platform.dapi.v0.GetGroupInfosResponse\x12x\n\x0fgetGroupActions\x12\x31.org.dash.platform.dapi.v0.GetGroupActionsRequest\x1a\x32.org.dash.platform.dapi.v0.GetGroupActionsResponse\x12\x8a\x01\n\x15getGroupActionSigners\x12\x37.org.dash.platform.dapi.v0.GetGroupActionSignersRequest\x1a\x38.org.dash.platform.dapi.v0.GetGroupActionSignersResponseb\x06proto3' , dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) @@ -62,8 +62,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=47103, - serialized_end=47193, + serialized_start=46565, + serialized_end=46655, ) _sym_db.RegisterEnumDescriptor(_KEYPURPOSE) @@ -300,8 +300,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=43870, - serialized_end=43905, + serialized_start=43444, + serialized_end=43479, ) _sym_db.RegisterEnumDescriptor(_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT_ACTIONTYPE) @@ -11631,80 +11631,6 @@ serialized_end=43259, ) -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT = _descriptor.Descriptor( - name='TransferEvent', - full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='recipient_id', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.recipient_id', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='public_note', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.public_note', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='shared_encrypted_note', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.shared_encrypted_note', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='personal_encrypted_note', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.personal_encrypted_note', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='amount', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.amount', index=4, - number=5, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='_public_note', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent._public_note', - index=0, containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[]), - _descriptor.OneofDescriptor( - name='_shared_encrypted_note', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent._shared_encrypted_note', - index=1, containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[]), - _descriptor.OneofDescriptor( - name='_personal_encrypted_note', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent._personal_encrypted_note', - index=2, containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[]), - ], - serialized_start=43262, - serialized_end=43685, -) - _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT = _descriptor.Descriptor( name='EmergencyActionEvent', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent', @@ -11745,8 +11671,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=43688, - serialized_end=43921, + serialized_start=43262, + serialized_end=43495, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENCONFIGUPDATEEVENT = _descriptor.Descriptor( @@ -11788,8 +11714,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=43923, - serialized_end=44023, + serialized_start=43497, + serialized_end=43597, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONEVENT = _descriptor.Descriptor( @@ -11838,8 +11764,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=44026, - serialized_end=44406, + serialized_start=43600, + serialized_end=43980, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DOCUMENTEVENT = _descriptor.Descriptor( @@ -11874,8 +11800,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=44409, - serialized_end=44548, + serialized_start=43983, + serialized_end=44122, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DOCUMENTCREATEEVENT = _descriptor.Descriptor( @@ -11905,8 +11831,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=44550, - serialized_end=44597, + serialized_start=44124, + serialized_end=44171, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_CONTRACTUPDATEEVENT = _descriptor.Descriptor( @@ -11936,8 +11862,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=44599, - serialized_end=44646, + serialized_start=44173, + serialized_end=44220, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_CONTRACTEVENT = _descriptor.Descriptor( @@ -11972,8 +11898,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=44649, - serialized_end=44788, + serialized_start=44223, + serialized_end=44362, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT = _descriptor.Descriptor( @@ -12020,26 +11946,19 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='transfer', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.transfer', index=5, + name='emergency_action', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.emergency_action', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='emergency_action', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.emergency_action', index=6, + name='token_config_update', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.token_config_update', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='token_config_update', full_name='org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.token_config_update', index=7, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], @@ -12057,8 +11976,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=44791, - serialized_end=45746, + serialized_start=44365, + serialized_end=45208, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONENTRY = _descriptor.Descriptor( @@ -12095,8 +12014,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=45749, - serialized_end=45896, + serialized_start=45211, + serialized_end=45358, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONS = _descriptor.Descriptor( @@ -12126,8 +12045,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=45899, - serialized_end=46031, + serialized_start=45361, + serialized_end=45493, ) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0 = _descriptor.Descriptor( @@ -12162,7 +12081,7 @@ ], extensions=[ ], - nested_types=[_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_MINTEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_BURNEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_FREEZEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_UNFREEZEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DESTROYFROZENFUNDSEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_SHAREDENCRYPTEDNOTE, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_PERSONALENCRYPTEDNOTE, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENCONFIGUPDATEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DOCUMENTEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DOCUMENTCREATEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_CONTRACTUPDATEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_CONTRACTEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONENTRY, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONS, ], + nested_types=[_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_MINTEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_BURNEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_FREEZEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_UNFREEZEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DESTROYFROZENFUNDSEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_SHAREDENCRYPTEDNOTE, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_PERSONALENCRYPTEDNOTE, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENCONFIGUPDATEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DOCUMENTEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DOCUMENTCREATEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_CONTRACTUPDATEEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_CONTRACTEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONENTRY, _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_GROUPACTIONS, ], enum_types=[ ], serialized_options=None, @@ -12177,7 +12096,7 @@ fields=[]), ], serialized_start=42353, - serialized_end=46041, + serialized_end=45503, ) _GETGROUPACTIONSRESPONSE = _descriptor.Descriptor( @@ -12213,7 +12132,7 @@ fields=[]), ], serialized_start=42233, - serialized_end=46052, + serialized_end=45514, ) @@ -12272,8 +12191,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=46190, - serialized_end=46396, + serialized_start=45652, + serialized_end=45858, ) _GETGROUPACTIONSIGNERSREQUEST = _descriptor.Descriptor( @@ -12309,8 +12228,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=46055, - serialized_end=46447, + serialized_start=45517, + serialized_end=45909, ) @@ -12348,8 +12267,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=46879, - serialized_end=46932, + serialized_start=46341, + serialized_end=46394, ) _GETGROUPACTIONSIGNERSRESPONSE_GETGROUPACTIONSIGNERSRESPONSEV0_GROUPACTIONSIGNERS = _descriptor.Descriptor( @@ -12379,8 +12298,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=46935, - serialized_end=47080, + serialized_start=46397, + serialized_end=46542, ) _GETGROUPACTIONSIGNERSRESPONSE_GETGROUPACTIONSIGNERSRESPONSEV0 = _descriptor.Descriptor( @@ -12429,8 +12348,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=46588, - serialized_end=47090, + serialized_start=46050, + serialized_end=46552, ) _GETGROUPACTIONSIGNERSRESPONSE = _descriptor.Descriptor( @@ -12465,8 +12384,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=46450, - serialized_end=47101, + serialized_start=45912, + serialized_end=46563, ) _GETIDENTITYREQUEST_GETIDENTITYREQUESTV0.containing_type = _GETIDENTITYREQUEST @@ -13535,18 +13454,6 @@ _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DESTROYFROZENFUNDSEVENT.fields_by_name['public_note'].containing_oneof = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DESTROYFROZENFUNDSEVENT.oneofs_by_name['_public_note'] _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_SHAREDENCRYPTEDNOTE.containing_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0 _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_PERSONALENCRYPTEDNOTE.containing_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0 -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['shared_encrypted_note'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_SHAREDENCRYPTEDNOTE -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['personal_encrypted_note'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_PERSONALENCRYPTEDNOTE -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.containing_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0 -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.oneofs_by_name['_public_note'].fields.append( - _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['public_note']) -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['public_note'].containing_oneof = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.oneofs_by_name['_public_note'] -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.oneofs_by_name['_shared_encrypted_note'].fields.append( - _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['shared_encrypted_note']) -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['shared_encrypted_note'].containing_oneof = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.oneofs_by_name['_shared_encrypted_note'] -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.oneofs_by_name['_personal_encrypted_note'].fields.append( - _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['personal_encrypted_note']) -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.fields_by_name['personal_encrypted_note'].containing_oneof = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT.oneofs_by_name['_personal_encrypted_note'] _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT.fields_by_name['action_type'].enum_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT_ACTIONTYPE _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT.containing_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0 _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT_ACTIONTYPE.containing_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT @@ -13587,7 +13494,6 @@ _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['freeze'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_FREEZEEVENT _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['unfreeze'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_UNFREEZEEVENT _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['destroy_frozen_funds'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_DESTROYFROZENFUNDSEVENT -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['transfer'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['emergency_action'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['token_config_update'].message_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENCONFIGUPDATEEVENT _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.containing_type = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0 @@ -13606,9 +13512,6 @@ _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.oneofs_by_name['type'].fields.append( _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['destroy_frozen_funds']) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['destroy_frozen_funds'].containing_oneof = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.oneofs_by_name['type'] -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.oneofs_by_name['type'].fields.append( - _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['transfer']) -_GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['transfer'].containing_oneof = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.oneofs_by_name['type'] _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.oneofs_by_name['type'].fields.append( _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['emergency_action']) _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.fields_by_name['emergency_action'].containing_oneof = _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TOKENEVENT.oneofs_by_name['type'] @@ -15764,13 +15667,6 @@ }) , - 'TransferEvent' : _reflection.GeneratedProtocolMessageType('TransferEvent', (_message.Message,), { - 'DESCRIPTOR' : _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_TRANSFEREVENT, - '__module__' : 'platform_pb2' - # @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent) - }) - , - 'EmergencyActionEvent' : _reflection.GeneratedProtocolMessageType('EmergencyActionEvent', (_message.Message,), { 'DESCRIPTOR' : _GETGROUPACTIONSRESPONSE_GETGROUPACTIONSRESPONSEV0_EMERGENCYACTIONEVENT, '__module__' : 'platform_pb2' @@ -15858,7 +15754,6 @@ _sym_db.RegisterMessage(GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent) _sym_db.RegisterMessage(GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote) _sym_db.RegisterMessage(GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote) -_sym_db.RegisterMessage(GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent) _sym_db.RegisterMessage(GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent) _sym_db.RegisterMessage(GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent) _sym_db.RegisterMessage(GetGroupActionsResponse.GetGroupActionsResponseV0.GroupActionEvent) @@ -15960,8 +15855,8 @@ index=0, serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_start=47196, - serialized_end=53366, + serialized_start=46658, + serialized_end=52828, methods=[ _descriptor.MethodDescriptor( name='broadcastStateTransition', diff --git a/packages/dapi-grpc/clients/platform/v0/web/platform_pb.d.ts b/packages/dapi-grpc/clients/platform/v0/web/platform_pb.d.ts index 623e82aeb11..cccf8127174 100644 --- a/packages/dapi-grpc/clients/platform/v0/web/platform_pb.d.ts +++ b/packages/dapi-grpc/clients/platform/v0/web/platform_pb.d.ts @@ -8242,50 +8242,6 @@ export namespace GetGroupActionsResponse { } } - export class TransferEvent extends jspb.Message { - getRecipientId(): Uint8Array | string; - getRecipientId_asU8(): Uint8Array; - getRecipientId_asB64(): string; - setRecipientId(value: Uint8Array | string): void; - - hasPublicNote(): boolean; - clearPublicNote(): void; - getPublicNote(): string; - setPublicNote(value: string): void; - - hasSharedEncryptedNote(): boolean; - clearSharedEncryptedNote(): void; - getSharedEncryptedNote(): GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote | undefined; - setSharedEncryptedNote(value?: GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote): void; - - hasPersonalEncryptedNote(): boolean; - clearPersonalEncryptedNote(): void; - getPersonalEncryptedNote(): GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote | undefined; - setPersonalEncryptedNote(value?: GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote): void; - - getAmount(): number; - setAmount(value: number): void; - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): TransferEvent.AsObject; - static toObject(includeInstance: boolean, msg: TransferEvent): TransferEvent.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: TransferEvent, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): TransferEvent; - static deserializeBinaryFromReader(message: TransferEvent, reader: jspb.BinaryReader): TransferEvent; - } - - export namespace TransferEvent { - export type AsObject = { - recipientId: Uint8Array | string, - publicNote: string, - sharedEncryptedNote?: GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.AsObject, - personalEncryptedNote?: GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.AsObject, - amount: number, - } - } - export class EmergencyActionEvent extends jspb.Message { getActionType(): GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.ActionTypeMap[keyof GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.ActionTypeMap]; setActionType(value: GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.ActionTypeMap[keyof GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.ActionTypeMap]): void; @@ -8515,11 +8471,6 @@ export namespace GetGroupActionsResponse { getDestroyFrozenFunds(): GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent | undefined; setDestroyFrozenFunds(value?: GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent): void; - hasTransfer(): boolean; - clearTransfer(): void; - getTransfer(): GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent | undefined; - setTransfer(value?: GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent): void; - hasEmergencyAction(): boolean; clearEmergencyAction(): void; getEmergencyAction(): GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent | undefined; @@ -8548,7 +8499,6 @@ export namespace GetGroupActionsResponse { freeze?: GetGroupActionsResponse.GetGroupActionsResponseV0.FreezeEvent.AsObject, unfreeze?: GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEvent.AsObject, destroyFrozenFunds?: GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.AsObject, - transfer?: GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.AsObject, emergencyAction?: GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.AsObject, tokenConfigUpdate?: GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.AsObject, } @@ -8560,9 +8510,8 @@ export namespace GetGroupActionsResponse { FREEZE = 3, UNFREEZE = 4, DESTROY_FROZEN_FUNDS = 5, - TRANSFER = 6, - EMERGENCY_ACTION = 7, - TOKEN_CONFIG_UPDATE = 8, + EMERGENCY_ACTION = 6, + TOKEN_CONFIG_UPDATE = 7, } } diff --git a/packages/dapi-grpc/clients/platform/v0/web/platform_pb.js b/packages/dapi-grpc/clients/platform/v0/web/platform_pb.js index 36356ff116d..db423372350 100644 --- a/packages/dapi-grpc/clients/platform/v0/web/platform_pb.js +++ b/packages/dapi-grpc/clients/platform/v0/web/platform_pb.js @@ -178,7 +178,6 @@ goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGr goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.TypeCase', null, { proto }); -goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEvent', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.VersionCase', null, { proto }); goog.exportSymbol('proto.org.dash.platform.dapi.v0.GetGroupInfoRequest', null, { proto }); @@ -5963,27 +5962,6 @@ if (goog.DEBUG && !COMPILED) { */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.displayName = 'proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote'; } -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.displayName = 'proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent'; -} /** * Generated by JsPbCodeGenerator. * @param {Array=} opt_data Optional initial data array, typically from a @@ -63042,340 +63020,6 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.toObject = function(opt_includeInstance) { - return proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.toObject = function(includeInstance, msg) { - var f, obj = { - recipientId: msg.getRecipientId_asB64(), - publicNote: jspb.Message.getFieldWithDefault(msg, 2, ""), - sharedEncryptedNote: (f = msg.getSharedEncryptedNote()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.toObject(includeInstance, f), - personalEncryptedNote: (f = msg.getPersonalEncryptedNote()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.toObject(includeInstance, f), - amount: jspb.Message.getFieldWithDefault(msg, 5, 0) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent; - return proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {!Uint8Array} */ (reader.readBytes()); - msg.setRecipientId(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setPublicNote(value); - break; - case 3: - var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote; - reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.deserializeBinaryFromReader); - msg.setSharedEncryptedNote(value); - break; - case 4: - var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote; - reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.deserializeBinaryFromReader); - msg.setPersonalEncryptedNote(value); - break; - case 5: - var value = /** @type {number} */ (reader.readUint64()); - msg.setAmount(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getRecipientId_asU8(); - if (f.length > 0) { - writer.writeBytes( - 1, - f - ); - } - f = /** @type {string} */ (jspb.Message.getField(message, 2)); - if (f != null) { - writer.writeString( - 2, - f - ); - } - f = message.getSharedEncryptedNote(); - if (f != null) { - writer.writeMessage( - 3, - f, - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote.serializeBinaryToWriter - ); - } - f = message.getPersonalEncryptedNote(); - if (f != null) { - writer.writeMessage( - 4, - f, - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote.serializeBinaryToWriter - ); - } - f = message.getAmount(); - if (f !== 0) { - writer.writeUint64( - 5, - f - ); - } -}; - - -/** - * optional bytes recipient_id = 1; - * @return {string} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getRecipientId = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * optional bytes recipient_id = 1; - * This is a type-conversion wrapper around `getRecipientId()` - * @return {string} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getRecipientId_asB64 = function() { - return /** @type {string} */ (jspb.Message.bytesAsB64( - this.getRecipientId())); -}; - - -/** - * optional bytes recipient_id = 1; - * Note that Uint8Array is not supported on all browsers. - * @see http://caniuse.com/Uint8Array - * This is a type-conversion wrapper around `getRecipientId()` - * @return {!Uint8Array} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getRecipientId_asU8 = function() { - return /** @type {!Uint8Array} */ (jspb.Message.bytesAsU8( - this.getRecipientId())); -}; - - -/** - * @param {!(string|Uint8Array)} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setRecipientId = function(value) { - return jspb.Message.setProto3BytesField(this, 1, value); -}; - - -/** - * optional string public_note = 2; - * @return {string} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getPublicNote = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setPublicNote = function(value) { - return jspb.Message.setField(this, 2, value); -}; - - -/** - * Clears the field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.clearPublicNote = function() { - return jspb.Message.setField(this, 2, undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.hasPublicNote = function() { - return jspb.Message.getField(this, 2) != null; -}; - - -/** - * optional SharedEncryptedNote shared_encrypted_note = 3; - * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getSharedEncryptedNote = function() { - return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote, 3)); -}; - - -/** - * @param {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.SharedEncryptedNote|undefined} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this -*/ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setSharedEncryptedNote = function(value) { - return jspb.Message.setWrapperField(this, 3, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.clearSharedEncryptedNote = function() { - return this.setSharedEncryptedNote(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.hasSharedEncryptedNote = function() { - return jspb.Message.getField(this, 3) != null; -}; - - -/** - * optional PersonalEncryptedNote personal_encrypted_note = 4; - * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getPersonalEncryptedNote = function() { - return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote, 4)); -}; - - -/** - * @param {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.PersonalEncryptedNote|undefined} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this -*/ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setPersonalEncryptedNote = function(value) { - return jspb.Message.setWrapperField(this, 4, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.clearPersonalEncryptedNote = function() { - return this.setPersonalEncryptedNote(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.hasPersonalEncryptedNote = function() { - return jspb.Message.getField(this, 4) != null; -}; - - -/** - * optional uint64 amount = 5; - * @return {number} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.getAmount = function() { - return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 5, 0)); -}; - - -/** - * @param {number} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.prototype.setAmount = function(value) { - return jspb.Message.setProto3IntField(this, 5, value); -}; - - - - - if (jspb.Message.GENERATE_TO_OBJECT) { /** * Creates an object representation of this proto. @@ -64710,7 +64354,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @private {!Array>} * @const */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_ = [[1,2,3,4,5,6,7,8]]; +proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_ = [[1,2,3,4,5,6,7]]; /** * @enum {number} @@ -64722,9 +64366,8 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV FREEZE: 3, UNFREEZE: 4, DESTROY_FROZEN_FUNDS: 5, - TRANSFER: 6, - EMERGENCY_ACTION: 7, - TOKEN_CONFIG_UPDATE: 8 + EMERGENCY_ACTION: 6, + TOKEN_CONFIG_UPDATE: 7 }; /** @@ -64770,7 +64413,6 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV freeze: (f = msg.getFreeze()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.FreezeEvent.toObject(includeInstance, f), unfreeze: (f = msg.getUnfreeze()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.UnfreezeEvent.toObject(includeInstance, f), destroyFrozenFunds: (f = msg.getDestroyFrozenFunds()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.toObject(includeInstance, f), - transfer: (f = msg.getTransfer()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.toObject(includeInstance, f), emergencyAction: (f = msg.getEmergencyAction()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.toObject(includeInstance, f), tokenConfigUpdate: (f = msg.getTokenConfigUpdate()) && proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.toObject(includeInstance, f) }; @@ -64835,16 +64477,11 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV msg.setDestroyFrozenFunds(value); break; case 6: - var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent; - reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.deserializeBinaryFromReader); - msg.setTransfer(value); - break; - case 7: var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent; reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.deserializeBinaryFromReader); msg.setEmergencyAction(value); break; - case 8: + case 7: var value = new proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent; reader.readMessage(value,proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.deserializeBinaryFromReader); msg.setTokenConfigUpdate(value); @@ -64918,18 +64555,10 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.DestroyFrozenFundsEvent.serializeBinaryToWriter ); } - f = message.getTransfer(); - if (f != null) { - writer.writeMessage( - 6, - f, - proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent.serializeBinaryToWriter - ); - } f = message.getEmergencyAction(); if (f != null) { writer.writeMessage( - 7, + 6, f, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent.serializeBinaryToWriter ); @@ -64937,7 +64566,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV f = message.getTokenConfigUpdate(); if (f != null) { writer.writeMessage( - 8, + 7, f, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent.serializeBinaryToWriter ); @@ -65131,49 +64760,12 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV /** - * optional TransferEvent transfer = 6; - * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.getTransfer = function() { - return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent, 6)); -}; - - -/** - * @param {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TransferEvent|undefined} value - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this -*/ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.setTransfer = function(value) { - return jspb.Message.setOneofWrapperField(this, 6, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.clearTransfer = function() { - return this.setTransfer(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.hasTransfer = function() { - return jspb.Message.getField(this, 6) != null; -}; - - -/** - * optional EmergencyActionEvent emergency_action = 7; + * optional EmergencyActionEvent emergency_action = 6; * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.getEmergencyAction = function() { return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent, 7)); + jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.EmergencyActionEvent, 6)); }; @@ -65182,7 +64774,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.setEmergencyAction = function(value) { - return jspb.Message.setOneofWrapperField(this, 7, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); + return jspb.Message.setOneofWrapperField(this, 6, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); }; @@ -65200,17 +64792,17 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {boolean} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.hasEmergencyAction = function() { - return jspb.Message.getField(this, 7) != null; + return jspb.Message.getField(this, 6) != null; }; /** - * optional TokenConfigUpdateEvent token_config_update = 8; + * optional TokenConfigUpdateEvent token_config_update = 7; * @return {?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.getTokenConfigUpdate = function() { return /** @type{?proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent} */ ( - jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent, 8)); + jspb.Message.getWrapperField(this, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenConfigUpdateEvent, 7)); }; @@ -65219,7 +64811,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {!proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent} returns this */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.setTokenConfigUpdate = function(value) { - return jspb.Message.setOneofWrapperField(this, 8, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); + return jspb.Message.setOneofWrapperField(this, 7, proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.oneofGroups_[0], value); }; @@ -65237,7 +64829,7 @@ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV * @return {boolean} */ proto.org.dash.platform.dapi.v0.GetGroupActionsResponse.GetGroupActionsResponseV0.TokenEvent.prototype.hasTokenConfigUpdate = function() { - return jspb.Message.getField(this, 8) != null; + return jspb.Message.getField(this, 7) != null; }; diff --git a/packages/dapi-grpc/protos/platform/v0/platform.proto b/packages/dapi-grpc/protos/platform/v0/platform.proto index d35efdd131a..905ef6c1d23 100644 --- a/packages/dapi-grpc/protos/platform/v0/platform.proto +++ b/packages/dapi-grpc/protos/platform/v0/platform.proto @@ -1646,15 +1646,6 @@ message GetGroupActionsResponse { bytes encrypted_data = 3; // Encrypted data } - // Transfer event - message TransferEvent { - bytes recipient_id = 1; // Recipient identifier - optional string public_note = 2; // Public note - optional SharedEncryptedNote shared_encrypted_note = 3; // Shared encrypted note - optional PersonalEncryptedNote personal_encrypted_note = 4; // Personal encrypted note - uint64 amount = 5; // Amount transferred - } - // Emergency action event message EmergencyActionEvent { // Enum for emergency action types @@ -1710,9 +1701,8 @@ message GetGroupActionsResponse { FreezeEvent freeze = 3; // Freeze event details UnfreezeEvent unfreeze = 4; // Unfreeze event details DestroyFrozenFundsEvent destroy_frozen_funds = 5; // Destroy frozen funds - TransferEvent transfer = 6; // Transfer event details - EmergencyActionEvent emergency_action = 7; // Emergency action details - TokenConfigUpdateEvent token_config_update = 8; // Token configuration update details + EmergencyActionEvent emergency_action = 6; // Emergency action details + TokenConfigUpdateEvent token_config_update = 7; // Token configuration update details } } diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.spec.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.spec.ts index 7ec6019cb7b..dad46135ac3 100644 --- a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.spec.ts +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.spec.ts @@ -81,11 +81,6 @@ describe('Client - Platform - Contracts - .get()', () => { apps, dpp, client, initialize, logger, fetcher, }, dataContractFixture.getId()); expect(contract.toJSON()).to.deep.equal(dataContractFixture.toJSON()); - expect(contract.getMetadata().getBlockHeight()).to.equal(BigInt(10)); - expect(contract.getMetadata().getCoreChainLockedHeight()).to.equal(42); - expect(contract.getMetadata().getTimeMs()).to.equal(BigInt(metadataFixture.getTimeMs())); - expect(contract.getMetadata().getProtocolVersion()) - .to.equal(metadataFixture.getProtocolVersion()); expect(askedFromDapi).to.equal(1); }); @@ -95,11 +90,6 @@ describe('Client - Platform - Contracts - .get()', () => { apps, dpp, client, initialize, logger, fetcher, }, dataContractFixture.getId()); expect(contract.toJSON()).to.deep.equal(dataContractFixture.toJSON()); - expect(contract.getMetadata().getBlockHeight()).to.equal(BigInt(10)); - expect(contract.getMetadata().getCoreChainLockedHeight()).to.equal(42); - expect(contract.getMetadata().getTimeMs()).to.equal(BigInt(metadataFixture.getTimeMs())); - expect(contract.getMetadata().getProtocolVersion()) - .to.equal(metadataFixture.getProtocolVersion()); expect(askedFromDapi).to.equal(1); }); }); diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.ts index 6238b982181..1a08b3d78a0 100644 --- a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.ts +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/contracts/get.ts @@ -1,4 +1,4 @@ -import { Identifier, Metadata } from '@dashevo/wasm-dpp'; +import { Identifier } from '@dashevo/wasm-dpp'; import { GetDataContractResponse } from '@dashevo/dapi-client/lib/methods/platform/getDataContract/GetDataContractResponse'; import { Platform } from '../../Platform'; @@ -44,18 +44,6 @@ export async function get(this: Platform, identifier: ContractIdentifier): Promi const contract = await this.dpp.dataContract .createFromBuffer(dataContractResponse.getDataContract() as Uint8Array); - let metadata; - const responseMetadata = dataContractResponse.getMetadata(); - if (responseMetadata) { - metadata = new Metadata( - responseMetadata.getHeight(), - responseMetadata.getCoreChainLockedHeight(), - responseMetadata.getTimeMs(), - responseMetadata.getProtocolVersion(), - ); - } - contract.setMetadata(metadata); - // Store contract to the cache // eslint-disable-next-line for (const appName of this.client.getApps().getNames()) { diff --git a/packages/rs-dpp/Cargo.toml b/packages/rs-dpp/Cargo.toml index 09a509111da..b7641ad2dac 100644 --- a/packages/rs-dpp/Cargo.toml +++ b/packages/rs-dpp/Cargo.toml @@ -14,7 +14,6 @@ authors = [ [dependencies] anyhow = { version = "1.0.81" } async-trait = { version = "0.1.79" } -ordered-float = { version = "4.6.0", features = ["serde"]} base64 = "0.22.1" bs58 = "0.5" byteorder = { version = "1.4" } diff --git a/packages/rs-dpp/src/block/extended_epoch_info/mod.rs b/packages/rs-dpp/src/block/extended_epoch_info/mod.rs index 238fdd2a629..9fd9fcbff06 100644 --- a/packages/rs-dpp/src/block/extended_epoch_info/mod.rs +++ b/packages/rs-dpp/src/block/extended_epoch_info/mod.rs @@ -9,7 +9,7 @@ use derive_more::From; use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; use serde::{Deserialize, Serialize}; -/// Extended Block information +/// Extended Epoch information #[derive( Clone, Debug, diff --git a/packages/rs-dpp/src/block/finalized_epoch_info/getters.rs b/packages/rs-dpp/src/block/finalized_epoch_info/getters.rs new file mode 100644 index 00000000000..f48d3298b4c --- /dev/null +++ b/packages/rs-dpp/src/block/finalized_epoch_info/getters.rs @@ -0,0 +1,79 @@ +use crate::block::finalized_epoch_info::v0::getters::FinalizedEpochInfoGettersV0; +use crate::block::finalized_epoch_info::FinalizedEpochInfo; +use crate::fee::Credits; +use crate::prelude::{BlockHeight, BlockHeightInterval, CoreBlockHeight, TimestampMillis}; +use platform_value::Identifier; + +impl FinalizedEpochInfoGettersV0 for FinalizedEpochInfo { + fn first_block_time(&self) -> TimestampMillis { + match self { + FinalizedEpochInfo::V0(v0) => v0.first_block_time(), + } + } + + fn first_block_height(&self) -> BlockHeight { + match self { + FinalizedEpochInfo::V0(v0) => v0.first_block_height(), + } + } + + fn total_blocks_in_epoch(&self) -> BlockHeightInterval { + match self { + FinalizedEpochInfo::V0(v0) => v0.total_blocks_in_epoch(), + } + } + + fn first_core_block_height(&self) -> CoreBlockHeight { + match self { + FinalizedEpochInfo::V0(v0) => v0.first_core_block_height(), + } + } + + fn next_epoch_start_core_block_height(&self) -> CoreBlockHeight { + match self { + FinalizedEpochInfo::V0(v0) => v0.next_epoch_start_core_block_height(), + } + } + + fn total_processing_fees(&self) -> Credits { + match self { + FinalizedEpochInfo::V0(v0) => v0.total_processing_fees(), + } + } + + fn total_distributed_storage_fees(&self) -> Credits { + match self { + FinalizedEpochInfo::V0(v0) => v0.total_distributed_storage_fees(), + } + } + + fn total_created_storage_fees(&self) -> Credits { + match self { + FinalizedEpochInfo::V0(v0) => v0.total_created_storage_fees(), + } + } + + fn core_block_rewards(&self) -> Credits { + match self { + FinalizedEpochInfo::V0(v0) => v0.core_block_rewards(), + } + } + + fn block_proposers(&self) -> &Vec<(Identifier, u64)> { + match self { + FinalizedEpochInfo::V0(v0) => v0.block_proposers(), + } + } + + fn fee_multiplier_permille(&self) -> u64 { + match self { + FinalizedEpochInfo::V0(v0) => v0.fee_multiplier_permille(), + } + } + + fn protocol_version(&self) -> u32 { + match self { + FinalizedEpochInfo::V0(v0) => v0.protocol_version(), + } + } +} diff --git a/packages/rs-dpp/src/block/finalized_epoch_info/mod.rs b/packages/rs-dpp/src/block/finalized_epoch_info/mod.rs new file mode 100644 index 00000000000..d8f2854d927 --- /dev/null +++ b/packages/rs-dpp/src/block/finalized_epoch_info/mod.rs @@ -0,0 +1,27 @@ +mod getters; +pub mod v0; + +use crate::block::finalized_epoch_info::v0::FinalizedEpochInfoV0; +use crate::protocol_error::ProtocolError; +use bincode::{Decode, Encode}; +use derive_more::From; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use serde::{Deserialize, Serialize}; + +/// Finalized Epoch information +#[derive( + Clone, + Debug, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + PlatformSerialize, + PlatformDeserialize, + From, +)] +#[platform_serialize(unversioned)] //versioned directly, no need to use platform_version +pub enum FinalizedEpochInfo { + V0(FinalizedEpochInfoV0), +} diff --git a/packages/rs-dpp/src/block/finalized_epoch_info/v0/getters.rs b/packages/rs-dpp/src/block/finalized_epoch_info/v0/getters.rs new file mode 100644 index 00000000000..62a575b1187 --- /dev/null +++ b/packages/rs-dpp/src/block/finalized_epoch_info/v0/getters.rs @@ -0,0 +1,93 @@ +use crate::block::finalized_epoch_info::v0::FinalizedEpochInfoV0; +use crate::fee::Credits; +use crate::prelude::{BlockHeight, BlockHeightInterval, CoreBlockHeight, TimestampMillis}; +use platform_value::Identifier; + +/// Trait for accessing fields of `FinalizedEpochInfoV0`. +pub trait FinalizedEpochInfoGettersV0 { + /// Returns the first block time. + fn first_block_time(&self) -> TimestampMillis; + + /// Returns the first block height. + fn first_block_height(&self) -> BlockHeight; + + /// Returns the total blocks in the epoch. + fn total_blocks_in_epoch(&self) -> BlockHeightInterval; + + /// Returns the first core block height. + fn first_core_block_height(&self) -> CoreBlockHeight; + + /// Returns the last core block height. + fn next_epoch_start_core_block_height(&self) -> CoreBlockHeight; + + /// Returns the total processing fees. + fn total_processing_fees(&self) -> Credits; + + /// Returns the total distributed storage fees. + fn total_distributed_storage_fees(&self) -> Credits; + + /// Returns the total created storage fees. + fn total_created_storage_fees(&self) -> Credits; + + /// Total rewards given from core subsidy + fn core_block_rewards(&self) -> Credits; + + /// Returns a reference to the block proposers map. + fn block_proposers(&self) -> &Vec<(Identifier, u64)>; + + /// Returns the fee multiplier (permille). + fn fee_multiplier_permille(&self) -> u64; + + /// Returns the protocol version. + fn protocol_version(&self) -> u32; +} + +impl FinalizedEpochInfoGettersV0 for FinalizedEpochInfoV0 { + fn first_block_time(&self) -> TimestampMillis { + self.first_block_time + } + + fn first_block_height(&self) -> BlockHeight { + self.first_block_height + } + + fn total_blocks_in_epoch(&self) -> BlockHeightInterval { + self.total_blocks_in_epoch + } + + fn first_core_block_height(&self) -> CoreBlockHeight { + self.first_core_block_height + } + + fn next_epoch_start_core_block_height(&self) -> CoreBlockHeight { + self.next_epoch_start_core_block_height + } + + fn total_processing_fees(&self) -> Credits { + self.total_processing_fees + } + + fn total_distributed_storage_fees(&self) -> Credits { + self.total_distributed_storage_fees + } + + fn total_created_storage_fees(&self) -> Credits { + self.total_created_storage_fees + } + + fn core_block_rewards(&self) -> Credits { + self.core_block_rewards + } + + fn block_proposers(&self) -> &Vec<(Identifier, u64)> { + &self.block_proposers + } + + fn fee_multiplier_permille(&self) -> u64 { + self.fee_multiplier_permille + } + + fn protocol_version(&self) -> u32 { + self.protocol_version + } +} diff --git a/packages/rs-dpp/src/block/finalized_epoch_info/v0/mod.rs b/packages/rs-dpp/src/block/finalized_epoch_info/v0/mod.rs new file mode 100644 index 00000000000..71c8cd99a9e --- /dev/null +++ b/packages/rs-dpp/src/block/finalized_epoch_info/v0/mod.rs @@ -0,0 +1,36 @@ +pub mod getters; + +use crate::fee::Credits; +use crate::prelude::{BlockHeight, BlockHeightInterval, CoreBlockHeight, TimestampMillis}; +use bincode::{Decode, Encode}; +use platform_value::Identifier; +use serde::{Deserialize, Serialize}; + +/// Finalized Epoch information +#[derive(Clone, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)] +pub struct FinalizedEpochInfoV0 { + /// First block time + pub first_block_time: TimestampMillis, + /// First block height + pub first_block_height: BlockHeight, + /// Total blocks in epoch + pub total_blocks_in_epoch: BlockHeightInterval, + /// First core block height + pub first_core_block_height: CoreBlockHeight, + /// Last core block height + pub next_epoch_start_core_block_height: CoreBlockHeight, + /// Total processing fees + pub total_processing_fees: Credits, + /// Total distributed storage fees + pub total_distributed_storage_fees: Credits, + /// Total created storage fees + pub total_created_storage_fees: Credits, + /// Total rewards given from core subsidy + pub core_block_rewards: Credits, + /// Block proposers + pub block_proposers: Vec<(Identifier, u64)>, + /// Fee multiplier that you would divide by 1000 to get float value + pub fee_multiplier_permille: u64, + /// Protocol version + pub protocol_version: u32, +} diff --git a/packages/rs-dpp/src/block/mod.rs b/packages/rs-dpp/src/block/mod.rs index e07b2d2e311..a8957b13d88 100644 --- a/packages/rs-dpp/src/block/mod.rs +++ b/packages/rs-dpp/src/block/mod.rs @@ -2,3 +2,5 @@ pub mod block_info; pub mod epoch; pub mod extended_block_info; pub mod extended_epoch_info; +pub mod finalized_epoch_info; +pub mod pool_credits; diff --git a/packages/rs-dpp/src/block/pool_credits.rs b/packages/rs-dpp/src/block/pool_credits.rs new file mode 100644 index 00000000000..8326de4c11e --- /dev/null +++ b/packages/rs-dpp/src/block/pool_credits.rs @@ -0,0 +1,19 @@ +use crate::fee::Credits; +use std::fmt; + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct StorageAndProcessingPoolCredits { + pub storage_pool_credits: Credits, + pub processing_pool_credits: Credits, + pub total_credits: Credits, +} + +impl fmt::Display for StorageAndProcessingPoolCredits { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Storage: {}, Processing: {}, Total: {}", + self.storage_pool_credits, self.processing_pool_credits, self.total_credits + ) + } +} diff --git a/packages/rs-dpp/src/data_contract/accessors/mod.rs b/packages/rs-dpp/src/data_contract/accessors/mod.rs index 92fdb99bd94..edb686751d0 100644 --- a/packages/rs-dpp/src/data_contract/accessors/mod.rs +++ b/packages/rs-dpp/src/data_contract/accessors/mod.rs @@ -4,15 +4,16 @@ use crate::data_contract::document_type::{DocumentType, DocumentTypeRef}; use crate::data_contract::{ DocumentName, GroupContractPosition, TokenContractPosition, EMPTY_GROUPS, EMPTY_TOKENS, }; -use crate::metadata::Metadata; -use crate::prelude::DataContract; +use crate::prelude::{BlockHeight, DataContract}; use platform_value::Identifier; +use crate::block::epoch::EpochIndex; use crate::data_contract::accessors::v1::{DataContractV1Getters, DataContractV1Setters}; use crate::data_contract::associated_token::token_configuration::TokenConfiguration; use crate::data_contract::errors::DataContractError; use crate::data_contract::group::Group; +use crate::identity::TimestampMillis; use crate::tokens::errors::TokenError; use crate::ProtocolError; use std::collections::BTreeMap; @@ -115,20 +116,6 @@ impl DataContractV0Getters for DataContract { } } - fn metadata(&self) -> Option<&Metadata> { - match self { - DataContract::V0(v0) => v0.metadata(), - DataContract::V1(v1) => v1.metadata(), - } - } - - fn metadata_mut(&mut self) -> Option<&mut Metadata> { - match self { - DataContract::V0(v0) => v0.metadata_mut(), - DataContract::V1(v1) => v1.metadata_mut(), - } - } - fn config(&self) -> &DataContractConfig { match self { DataContract::V0(v0) => v0.config(), @@ -173,13 +160,6 @@ impl DataContractV0Setters for DataContract { } } - fn set_metadata(&mut self, metadata: Option) { - match self { - DataContract::V0(v0) => v0.set_metadata(metadata), - DataContract::V1(v1) => v1.set_metadata(metadata), - } - } - fn set_config(&mut self, config: DataContractConfig) { match self { DataContract::V0(v0) => v0.set_config(config), @@ -287,6 +267,54 @@ impl DataContractV1Getters for DataContract { DataContract::V1(v1) => v1.token_id(position), } } + + /// Returns the timestamp in milliseconds when the contract was created. + fn created_at(&self) -> Option { + match self { + DataContract::V0(_) => None, + DataContract::V1(v1) => v1.created_at, + } + } + + /// Returns the timestamp in milliseconds when the contract was last updated. + fn updated_at(&self) -> Option { + match self { + DataContract::V0(_) => None, + DataContract::V1(v1) => v1.updated_at, + } + } + + /// Returns the block height at which the contract was created. + fn created_at_block_height(&self) -> Option { + match self { + DataContract::V0(_) => None, + DataContract::V1(v1) => v1.created_at_block_height, + } + } + + /// Returns the block height at which the contract was last updated. + fn updated_at_block_height(&self) -> Option { + match self { + DataContract::V0(_) => None, + DataContract::V1(v1) => v1.updated_at_block_height, + } + } + + /// Returns the epoch at which the contract was created. + fn created_at_epoch(&self) -> Option { + match self { + DataContract::V0(_) => None, + DataContract::V1(v1) => v1.created_at_epoch, + } + } + + /// Returns the epoch at which the contract was last updated. + fn updated_at_epoch(&self) -> Option { + match self { + DataContract::V0(_) => None, + DataContract::V1(v1) => v1.updated_at_epoch, + } + } } impl DataContractV1Setters for DataContract { @@ -329,4 +357,46 @@ impl DataContractV1Setters for DataContract { } } } + + /// Sets the timestamp in milliseconds when the contract was created. + fn set_created_at(&mut self, created_at: Option) { + if let DataContract::V1(v1) = self { + v1.created_at = created_at; + } + } + + /// Sets the timestamp in milliseconds when the contract was last updated. + fn set_updated_at(&mut self, updated_at: Option) { + if let DataContract::V1(v1) = self { + v1.updated_at = updated_at; + } + } + + /// Sets the block height at which the contract was created. + fn set_created_at_block_height(&mut self, block_height: Option) { + if let DataContract::V1(v1) = self { + v1.created_at_block_height = block_height; + } + } + + /// Sets the block height at which the contract was last updated. + fn set_updated_at_block_height(&mut self, block_height: Option) { + if let DataContract::V1(v1) = self { + v1.updated_at_block_height = block_height; + } + } + + /// Sets the epoch at which the contract was created. + fn set_created_at_epoch(&mut self, epoch: Option) { + if let DataContract::V1(v1) = self { + v1.created_at_epoch = epoch; + } + } + + /// Sets the epoch at which the contract was last updated. + fn set_updated_at_epoch(&mut self, epoch: Option) { + if let DataContract::V1(v1) = self { + v1.updated_at_epoch = epoch; + } + } } diff --git a/packages/rs-dpp/src/data_contract/accessors/v0/mod.rs b/packages/rs-dpp/src/data_contract/accessors/v0/mod.rs index e2e3e5fcd45..33260d80a32 100644 --- a/packages/rs-dpp/src/data_contract/accessors/v0/mod.rs +++ b/packages/rs-dpp/src/data_contract/accessors/v0/mod.rs @@ -2,7 +2,6 @@ use crate::data_contract::config::DataContractConfig; use crate::data_contract::document_type::{DocumentType, DocumentTypeRef}; use crate::data_contract::errors::DataContractError; use crate::data_contract::DocumentName; -use crate::metadata::Metadata; use platform_value::Identifier; use std::collections::BTreeMap; @@ -39,12 +38,6 @@ pub trait DataContractV0Getters { /// Returns a mapping of document names to their corresponding document types as mutable. fn document_types_mut(&mut self) -> &mut BTreeMap; - /// Returns optional metadata associated with the contract. - fn metadata(&self) -> Option<&Metadata>; - - /// Returns a mutable reference to the optional metadata associated with the contract. - fn metadata_mut(&mut self) -> Option<&mut Metadata>; - /// Returns the internal configuration for the contract. fn config(&self) -> &DataContractConfig; @@ -64,9 +57,6 @@ pub trait DataContractV0Setters { /// Sets the identifier of the contract owner. fn set_owner_id(&mut self, owner_id: Identifier); - /// Sets the optional metadata associated with the contract. - fn set_metadata(&mut self, metadata: Option); - /// Sets the internal configuration for the contract. fn set_config(&mut self, config: DataContractConfig); } diff --git a/packages/rs-dpp/src/data_contract/accessors/v1/mod.rs b/packages/rs-dpp/src/data_contract/accessors/v1/mod.rs index 0727a24a21d..8178ab7ec2c 100644 --- a/packages/rs-dpp/src/data_contract/accessors/v1/mod.rs +++ b/packages/rs-dpp/src/data_contract/accessors/v1/mod.rs @@ -1,21 +1,26 @@ +use crate::block::epoch::EpochIndex; use crate::data_contract::accessors::v0::{DataContractV0Getters, DataContractV0Setters}; use crate::data_contract::associated_token::token_configuration::TokenConfiguration; use crate::data_contract::group::Group; use crate::data_contract::{GroupContractPosition, TokenContractPosition}; +use crate::identity::TimestampMillis; +use crate::prelude::BlockHeight; use crate::ProtocolError; use platform_value::Identifier; use std::collections::BTreeMap; pub trait DataContractV1Getters: DataContractV0Getters { - /// Gets a group at a certain position + /// Gets a group at a certain position. fn group(&self, position: GroupContractPosition) -> Result<&Group, ProtocolError>; + /// Returns a reference to the groups map. fn groups(&self) -> &BTreeMap; /// Returns a mutable reference to the groups map. fn groups_mut(&mut self) -> Option<&mut BTreeMap>; + /// Returns a reference to a group or an error. - /// Returns an Error for V0 since it doesn't have groups. + /// Returns an error for V0 since it doesn't have groups. fn expected_group(&self, position: GroupContractPosition) -> Result<&Group, ProtocolError>; /// Returns a reference to the tokens map. @@ -25,21 +30,39 @@ pub trait DataContractV1Getters: DataContractV0Getters { fn tokens_mut(&mut self) -> Option<&mut BTreeMap>; /// Returns a mutable reference to a token configuration or an error. - /// Returns an Error for V0 since it doesn't have tokens. + /// Returns an error for V0 since it doesn't have tokens. fn expected_token_configuration( &self, position: TokenContractPosition, ) -> Result<&TokenConfiguration, ProtocolError>; - /// Returns a mutable reference to a token configuration + /// Returns a mutable reference to a token configuration. /// Returns `None` for V0 since it doesn't have tokens. fn token_configuration_mut( &mut self, position: TokenContractPosition, ) -> Option<&mut TokenConfiguration>; - /// Returns the token id at a certain position + /// Returns the token id at a certain position. fn token_id(&self, position: TokenContractPosition) -> Option; + + /// Returns the timestamp in milliseconds when the contract was created. + fn created_at(&self) -> Option; + + /// Returns the timestamp in milliseconds when the contract was last updated. + fn updated_at(&self) -> Option; + + /// Returns the block height at which the contract was created. + fn created_at_block_height(&self) -> Option; + + /// Returns the block height at which the contract was last updated. + fn updated_at_block_height(&self) -> Option; + + /// Returns the epoch at which the contract was created. + fn created_at_epoch(&self) -> Option; + + /// Returns the epoch at which the contract was last updated. + fn updated_at_epoch(&self) -> Option; } pub trait DataContractV1Setters: DataContractV0Setters { @@ -54,4 +77,22 @@ pub trait DataContractV1Setters: DataContractV0Setters { /// Adds or updates a single token configuration in the tokens map. fn add_token(&mut self, pos: TokenContractPosition, token: TokenConfiguration); + + /// Sets the timestamp in milliseconds when the contract was created. + fn set_created_at(&mut self, created_at: Option); + + /// Sets the timestamp in milliseconds when the contract was last updated. + fn set_updated_at(&mut self, updated_at: Option); + + /// Sets the block height at which the contract was created. + fn set_created_at_block_height(&mut self, block_height: Option); + + /// Sets the block height at which the contract was last updated. + fn set_updated_at_block_height(&mut self, block_height: Option); + + /// Sets the epoch at which the contract was created. + fn set_created_at_epoch(&mut self, epoch: Option); + + /// Sets the block height at which the contract was last updated. + fn set_updated_at_epoch(&mut self, epoch: Option); } diff --git a/packages/rs-dpp/src/data_contract/associated_token/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/mod.rs index 099f3e29b6f..9cb4d425114 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/mod.rs @@ -1,7 +1,9 @@ pub mod token_configuration; pub mod token_configuration_convention; pub mod token_configuration_item; +pub mod token_configuration_localization; pub mod token_distribution_key; pub mod token_distribution_rules; +pub mod token_keeps_history_rules; pub mod token_perpetual_distribution; pub mod token_pre_programmed_distribution; diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/mod.rs index 2f063196a3e..662ab19373f 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/mod.rs @@ -7,6 +7,7 @@ use crate::data_contract::associated_token::token_configuration::accessors::v0:: use crate::data_contract::associated_token::token_configuration::TokenConfiguration; use crate::data_contract::associated_token::token_configuration_convention::TokenConfigurationConvention; use crate::data_contract::associated_token::token_distribution_rules::TokenDistributionRules; +use crate::data_contract::associated_token::token_keeps_history_rules::TokenKeepsHistoryRules; use crate::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; use crate::data_contract::change_control_rules::ChangeControlRules; use crate::data_contract::GroupContractPosition; @@ -42,7 +43,7 @@ impl TokenConfigurationV0Getters for TokenConfiguration { } /// Returns if we keep history. - fn keeps_history(&self) -> bool { + fn keeps_history(&self) -> &TokenKeepsHistoryRules { match self { TokenConfiguration::V0(v0) => v0.keeps_history(), } diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/v0/mod.rs index 2ab5f7a6259..59fa3d34b6c 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/v0/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/accessors/v0/mod.rs @@ -1,6 +1,7 @@ use crate::balances::credits::TokenAmount; use crate::data_contract::associated_token::token_configuration_convention::TokenConfigurationConvention; use crate::data_contract::associated_token::token_distribution_rules::TokenDistributionRules; +use crate::data_contract::associated_token::token_keeps_history_rules::TokenKeepsHistoryRules; use crate::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; use crate::data_contract::change_control_rules::ChangeControlRules; use crate::data_contract::GroupContractPosition; @@ -19,7 +20,7 @@ pub trait TokenConfigurationV0Getters { /// Returns the base supply. fn base_supply(&self) -> TokenAmount; /// Returns the base supply. - fn keeps_history(&self) -> bool; + fn keeps_history(&self) -> &TokenKeepsHistoryRules; fn start_as_paused(&self) -> bool; /// Returns the maximum supply. diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/accessors.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/accessors.rs index 7a289badbde..2e7c1353ad4 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/accessors.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/accessors.rs @@ -7,6 +7,7 @@ use crate::data_contract::associated_token::token_configuration::v0::{ }; use crate::data_contract::associated_token::token_distribution_rules::accessors::v0::TokenDistributionRulesV0Getters; use crate::data_contract::associated_token::token_distribution_rules::TokenDistributionRules; +use crate::data_contract::associated_token::token_keeps_history_rules::TokenKeepsHistoryRules; use crate::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; use crate::data_contract::change_control_rules::ChangeControlRules; use crate::data_contract::GroupContractPosition; @@ -35,8 +36,8 @@ impl TokenConfigurationV0Getters for TokenConfigurationV0 { } /// Returns if we keep history. - fn keeps_history(&self) -> bool { - self.keeps_history + fn keeps_history(&self) -> &TokenKeepsHistoryRules { + &self.keeps_history } /// Returns if we start off as paused diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/mod.rs index 0c8ccb6dc3e..c46e89f8ca8 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration/v0/mod.rs @@ -5,6 +5,8 @@ use crate::data_contract::associated_token::token_configuration_convention::v0:: use crate::data_contract::associated_token::token_configuration_convention::TokenConfigurationConvention; use crate::data_contract::associated_token::token_distribution_rules::v0::TokenDistributionRulesV0; use crate::data_contract::associated_token::token_distribution_rules::TokenDistributionRules; +use crate::data_contract::associated_token::token_keeps_history_rules::v0::TokenKeepsHistoryRulesV0; +use crate::data_contract::associated_token::token_keeps_history_rules::TokenKeepsHistoryRules; use crate::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; use crate::data_contract::change_control_rules::v0::ChangeControlRulesV0; use crate::data_contract::change_control_rules::ChangeControlRules; @@ -25,9 +27,9 @@ pub struct TokenConfigurationV0 { /// The maximum supply the token can ever have #[serde(default)] pub max_supply: Option, - /// Do we keep history, default is true. - #[serde(default = "default_keeps_history")] - pub keeps_history: bool, + /// The rules for keeping history. + #[serde(default = "default_token_keeps_history_rules")] + pub keeps_history: TokenKeepsHistoryRules, /// Do we start off as paused, meaning that we can not transfer till we unpause. #[serde(default = "default_starts_as_paused")] pub start_as_paused: bool, @@ -66,6 +68,15 @@ fn default_starts_as_paused() -> bool { false } +fn default_token_keeps_history_rules() -> TokenKeepsHistoryRules { + TokenKeepsHistoryRules::V0(TokenKeepsHistoryRulesV0 { + keeps_transfer_history: true, + keeps_freezing_history: true, + keeps_minting_history: true, + keeps_burning_history: true, + }) +} + fn default_token_distribution_rules() -> TokenDistributionRules { TokenDistributionRules::V0(TokenDistributionRulesV0 { perpetual_distribution: None, @@ -158,7 +169,12 @@ impl TokenConfigurationV0 { .into(), base_supply: 100000, max_supply: None, - keeps_history: true, + keeps_history: TokenKeepsHistoryRules::V0(TokenKeepsHistoryRulesV0 { + keeps_transfer_history: true, + keeps_freezing_history: true, + keeps_minting_history: true, + keeps_burning_history: true, + }), start_as_paused: false, max_supply_change_rules: ChangeControlRulesV0 { authorized_to_make_change: AuthorizedActionTakers::NoOne, diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/accessors/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/accessors/mod.rs new file mode 100644 index 00000000000..e084dffc38f --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/accessors/mod.rs @@ -0,0 +1 @@ +mod v0; diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/accessors/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/accessors/v0/mod.rs new file mode 100644 index 00000000000..1554382cc11 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/accessors/v0/mod.rs @@ -0,0 +1,26 @@ +use crate::data_contract::associated_token::token_configuration_localization::TokenConfigurationLocalization; +use std::collections::BTreeMap; + +/// Accessor trait for getters of `TokenConfigurationConventionV0` +pub trait TokenConfigurationConventionV0Getters { + /// Returns a reference to the localizations. + fn localizations(&self) -> &BTreeMap; + + /// Returns a mutable reference to the localizations. + fn localizations_mut(&mut self) -> &mut BTreeMap; + + /// Returns the decimals value. + fn decimals(&self) -> u16; +} + +/// Accessor trait for setters of `TokenConfigurationConventionV0` +pub trait TokenConfigurationConventionV0Setters { + /// Sets the localizations. + fn set_localizations( + &mut self, + localizations: BTreeMap, + ); + + /// Sets the decimals value. + fn set_decimals(&mut self, decimals: u16); +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/mod.rs index 76a4ed1370c..f4b332a6752 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/mod.rs @@ -2,7 +2,9 @@ use crate::data_contract::associated_token::token_configuration_convention::v0:: use bincode::{Decode, Encode}; use derive_more::From; use serde::{Deserialize, Serialize}; +use std::fmt; +mod accessors; pub mod v0; #[derive(Serialize, Deserialize, Encode, Decode, Debug, Clone, PartialEq, Eq, PartialOrd, From)] @@ -12,8 +14,6 @@ pub enum TokenConfigurationConvention { V0(TokenConfigurationConventionV0), } -use std::fmt; - impl fmt::Display for TokenConfigurationConvention { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/v0/mod.rs index 6ea538cc6e5..57875837132 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/v0/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_convention/v0/mod.rs @@ -1,34 +1,17 @@ +use crate::data_contract::associated_token::token_configuration_localization::TokenConfigurationLocalization; use bincode::Encode; use platform_serialization::de::Decode; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::fmt; -#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, PartialEq, Eq, PartialOrd)] -#[serde(rename_all = "camelCase")] -pub struct TokenConfigurationLocalizationsV0 { - pub should_capitalize: bool, - pub singular_form: String, - pub plural_form: String, -} - -impl fmt::Display for TokenConfigurationLocalizationsV0 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Capitalized: {}, Singular: '{}', Plural: '{}'", - self.should_capitalize, self.singular_form, self.plural_form - ) - } -} - #[derive( Serialize, Deserialize, Decode, Encode, Debug, Clone, PartialEq, Eq, PartialOrd, Default, )] #[serde(rename_all = "camelCase")] pub struct TokenConfigurationConventionV0 { #[serde(default)] - pub localizations: BTreeMap, + pub localizations: BTreeMap, #[serde(default = "default_decimals")] pub decimals: u16, } diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_item.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_item.rs index e91788708e9..87d519596a0 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_item.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_item.rs @@ -8,6 +8,7 @@ use bincode::Encode; use platform_serialization::de::Decode; use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; use platform_value::Identifier; +#[cfg(feature = "state-transition-serde-conversion")] use serde::{Deserialize, Serialize}; use std::fmt; diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/accessors/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/accessors/mod.rs new file mode 100644 index 00000000000..354e7a82984 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/accessors/mod.rs @@ -0,0 +1,46 @@ +use crate::data_contract::associated_token::token_configuration_localization::accessors::v0::{ + TokenConfigurationLocalizationV0Getters, TokenConfigurationLocalizationV0Setters, +}; +use crate::data_contract::associated_token::token_configuration_localization::TokenConfigurationLocalization; + +pub mod v0; + +impl TokenConfigurationLocalizationV0Getters for TokenConfigurationLocalization { + fn should_capitalize(&self) -> bool { + match self { + TokenConfigurationLocalization::V0(v0) => v0.should_capitalize, + } + } + + fn singular_form(&self) -> &str { + match self { + TokenConfigurationLocalization::V0(v0) => v0.singular_form.as_str(), + } + } + + fn plural_form(&self) -> &str { + match self { + TokenConfigurationLocalization::V0(v0) => v0.plural_form.as_str(), + } + } +} + +impl TokenConfigurationLocalizationV0Setters for TokenConfigurationLocalization { + fn set_should_capitalize(&mut self, should_capitalize: bool) { + match self { + TokenConfigurationLocalization::V0(v0) => v0.should_capitalize = should_capitalize, + } + } + + fn set_singular_form(&mut self, singular_form: String) { + match self { + TokenConfigurationLocalization::V0(v0) => v0.singular_form = singular_form, + } + } + + fn set_plural_form(&mut self, plural_form: String) { + match self { + TokenConfigurationLocalization::V0(v0) => v0.plural_form = plural_form, + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/accessors/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/accessors/v0/mod.rs new file mode 100644 index 00000000000..cd9b1428d56 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/accessors/v0/mod.rs @@ -0,0 +1,23 @@ +/// Accessor trait for getters of `TokenConfigurationLocalizationV0` +pub trait TokenConfigurationLocalizationV0Getters { + /// Returns whether the token name should be capitalized. + fn should_capitalize(&self) -> bool; + + /// Returns a reference to the singular form of the token name. + fn singular_form(&self) -> &str; + + /// Returns a reference to the plural form of the token name. + fn plural_form(&self) -> &str; +} + +/// Accessor trait for setters of `TokenConfigurationLocalizationV0` +pub trait TokenConfigurationLocalizationV0Setters { + /// Sets whether the token name should be capitalized. + fn set_should_capitalize(&mut self, should_capitalize: bool); + + /// Sets the singular form of the token name. + fn set_singular_form(&mut self, singular_form: String); + + /// Sets the plural form of the token name. + fn set_plural_form(&mut self, plural_form: String); +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/mod.rs new file mode 100644 index 00000000000..424d64b0115 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/mod.rs @@ -0,0 +1,26 @@ +use crate::data_contract::associated_token::token_configuration_localization::v0::TokenConfigurationLocalizationV0; +use bincode::Encode; +use derive_more::From; +use platform_serialization::de::Decode; +use serde::{Deserialize, Serialize}; +use std::fmt; + +pub mod accessors; +pub mod v0; + +#[derive(Serialize, Deserialize, Encode, Decode, Debug, Clone, PartialEq, Eq, PartialOrd, From)] +#[serde(tag = "$format_version")] +pub enum TokenConfigurationLocalization { + #[serde(rename = "0")] + V0(TokenConfigurationLocalizationV0), +} + +impl fmt::Display for TokenConfigurationLocalization { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TokenConfigurationLocalization::V0(v0) => { + write!(f, "{}", v0) //just pass through + } + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/v0/accessors.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/v0/accessors.rs new file mode 100644 index 00000000000..fb9ecc425a4 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/v0/accessors.rs @@ -0,0 +1,32 @@ +use crate::data_contract::associated_token::token_configuration_localization::accessors::v0::{ + TokenConfigurationLocalizationV0Getters, TokenConfigurationLocalizationV0Setters, +}; +use crate::data_contract::associated_token::token_configuration_localization::v0::TokenConfigurationLocalizationV0; + +impl TokenConfigurationLocalizationV0Getters for TokenConfigurationLocalizationV0 { + fn should_capitalize(&self) -> bool { + self.should_capitalize + } + + fn singular_form(&self) -> &str { + &self.singular_form + } + + fn plural_form(&self) -> &str { + &self.plural_form + } +} + +impl TokenConfigurationLocalizationV0Setters for TokenConfigurationLocalizationV0 { + fn set_should_capitalize(&mut self, should_capitalize: bool) { + self.should_capitalize = should_capitalize; + } + + fn set_singular_form(&mut self, singular_form: String) { + self.singular_form = singular_form; + } + + fn set_plural_form(&mut self, plural_form: String) { + self.plural_form = plural_form; + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/v0/mod.rs new file mode 100644 index 00000000000..86521c1bc73 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_configuration_localization/v0/mod.rs @@ -0,0 +1,23 @@ +mod accessors; + +use bincode::{Decode, Encode}; +use serde::{Deserialize, Serialize}; +use std::fmt; + +#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, PartialEq, Eq, PartialOrd)] +#[serde(rename_all = "camelCase")] +pub struct TokenConfigurationLocalizationV0 { + pub should_capitalize: bool, + pub singular_form: String, + pub plural_form: String, +} + +impl fmt::Display for TokenConfigurationLocalizationV0 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Capitalized: {}, Singular: '{}', Plural: '{}'", + self.should_capitalize, self.singular_form, self.plural_form + ) + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_distribution_key.rs b/packages/rs-dpp/src/data_contract/associated_token/token_distribution_key.rs index e3ae314754b..8953531cf6a 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_distribution_key.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_distribution_key.rs @@ -1,14 +1,95 @@ -use crate::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::{TokenDistributionRecipient, TokenDistributionResolvedRecipient}; use crate::errors::ProtocolError; use bincode::{Decode, Encode}; use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; use platform_value::Identifier; use serde::{Deserialize, Serialize}; +use std::fmt; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use crate::prelude::TimestampMillis; -#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, PartialEq, Eq)] -pub enum DistributionType { - PreProgrammed, - Perpetual, +/// Represents the type of token distribution. +/// +/// - `PreProgrammed`: A scheduled distribution with predefined rules. +/// - `Perpetual`: A continuous or recurring distribution. +#[derive( + Serialize, Deserialize, Decode, Encode, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Default, +)] +pub enum TokenDistributionType { + /// A pre-programmed distribution scheduled for a specific time. + #[default] + PreProgrammed = 0, + + /// A perpetual distribution that occurs at regular intervals. + Perpetual = 1, +} + +/// Represents a token distribution with a resolved recipient. +/// +/// - `PreProgrammed(Identifier)`: A predefined recipient for a scheduled distribution. +/// - `Perpetual(TokenDistributionResolvedRecipient)`: A resolved recipient for an ongoing distribution. +#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, PartialEq, Eq, PartialOrd)] +pub enum TokenDistributionTypeWithResolvedRecipient { + /// A scheduled distribution with a known recipient. + PreProgrammed(Identifier), + + /// A perpetual distribution with a resolved recipient. + Perpetual(TokenDistributionResolvedRecipient), +} + +/// Contains information about a specific token distribution instance. +/// +/// - `PreProgrammed(TimestampMillis, Identifier)`: A scheduled distribution with a timestamp and recipient. +/// - `Perpetual(RewardDistributionMoment, RewardDistributionMoment, TokenDistributionResolvedRecipient)`: +/// A perpetual distribution with previous and next distribution moments, along with the resolved recipient. +#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, PartialEq, Eq, PartialOrd)] +pub enum TokenDistributionInfo { + /// A pre-programmed token distribution set for a specific time. + /// Contains the scheduled timestamp and the recipient’s identifier. + PreProgrammed(TimestampMillis, Identifier), + + /// A perpetual token distribution with previous and next moments. + /// Includes the last and next distribution times and the resolved recipient. + Perpetual( + RewardDistributionMoment, + RewardDistributionMoment, + TokenDistributionResolvedRecipient, + ), +} + +impl From for TokenDistributionTypeWithResolvedRecipient { + fn from(info: TokenDistributionInfo) -> Self { + match info { + TokenDistributionInfo::PreProgrammed(_, recipient) => { + TokenDistributionTypeWithResolvedRecipient::PreProgrammed(recipient) + } + TokenDistributionInfo::Perpetual(_, _, recipient) => { + TokenDistributionTypeWithResolvedRecipient::Perpetual(recipient) + } + } + } +} + +impl From<&TokenDistributionInfo> for TokenDistributionTypeWithResolvedRecipient { + fn from(info: &TokenDistributionInfo) -> Self { + match info { + TokenDistributionInfo::PreProgrammed(_, recipient) => { + TokenDistributionTypeWithResolvedRecipient::PreProgrammed(*recipient) + } + TokenDistributionInfo::Perpetual(_, _, recipient) => { + TokenDistributionTypeWithResolvedRecipient::Perpetual(recipient.clone()) + } + } + } +} + +impl fmt::Display for TokenDistributionType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenDistributionType::PreProgrammed => write!(f, "PreProgrammed"), + TokenDistributionType::Perpetual => write!(f, "Perpetual"), + } + } } #[derive( @@ -27,5 +108,5 @@ pub enum DistributionType { pub struct TokenDistributionKey { pub token_id: Identifier, pub recipient: TokenDistributionRecipient, - pub distribution_type: DistributionType, + pub distribution_type: TokenDistributionType, } diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/accessors/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/accessors/mod.rs new file mode 100644 index 00000000000..928c105b9ff --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/accessors/mod.rs @@ -0,0 +1,60 @@ +use crate::data_contract::associated_token::token_keeps_history_rules::accessors::v0::{ + TokenKeepsHistoryRulesV0Getters, TokenKeepsHistoryRulesV0Setters, +}; +use crate::data_contract::associated_token::token_keeps_history_rules::TokenKeepsHistoryRules; + +pub mod v0; + +/// Implementing `TokenKeepsHistoryRulesV0Getters` for `TokenKeepsHistoryRules` +impl TokenKeepsHistoryRulesV0Getters for TokenKeepsHistoryRules { + fn keeps_transfer_history(&self) -> bool { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_transfer_history, + } + } + + fn keeps_freezing_history(&self) -> bool { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_freezing_history, + } + } + + fn keeps_minting_history(&self) -> bool { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_minting_history, + } + } + + fn keeps_burning_history(&self) -> bool { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_burning_history, + } + } +} + +/// Implementing `TokenKeepsHistoryRulesV0Setters` for `TokenKeepsHistoryRules` +impl TokenKeepsHistoryRulesV0Setters for TokenKeepsHistoryRules { + fn set_keeps_transfer_history(&mut self, value: bool) { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_transfer_history = value, + } + } + + fn set_keeps_freezing_history(&mut self, value: bool) { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_freezing_history = value, + } + } + + fn set_keeps_minting_history(&mut self, value: bool) { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_minting_history = value, + } + } + + fn set_keeps_burning_history(&mut self, value: bool) { + match self { + TokenKeepsHistoryRules::V0(v0) => v0.keeps_burning_history = value, + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/accessors/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/accessors/v0/mod.rs new file mode 100644 index 00000000000..791a5417002 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/accessors/v0/mod.rs @@ -0,0 +1,29 @@ +/// Accessor trait for getters of `TokenKeepsHistoryRulesV0` +pub trait TokenKeepsHistoryRulesV0Getters { + /// Returns whether transfer history is kept. + fn keeps_transfer_history(&self) -> bool; + + /// Returns whether freezing history is kept. + fn keeps_freezing_history(&self) -> bool; + + /// Returns whether minting history is kept. + fn keeps_minting_history(&self) -> bool; + + /// Returns whether burning history is kept. + fn keeps_burning_history(&self) -> bool; +} + +/// Accessor trait for setters of `TokenKeepsHistoryRulesV0` +pub trait TokenKeepsHistoryRulesV0Setters { + /// Sets whether transfer history is kept. + fn set_keeps_transfer_history(&mut self, value: bool); + + /// Sets whether freezing history is kept. + fn set_keeps_freezing_history(&mut self, value: bool); + + /// Sets whether minting history is kept. + fn set_keeps_minting_history(&mut self, value: bool); + + /// Sets whether burning history is kept. + fn set_keeps_burning_history(&mut self, value: bool); +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/mod.rs new file mode 100644 index 00000000000..b96dad1f7ff --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/mod.rs @@ -0,0 +1,26 @@ +use bincode::{Decode, Encode}; +use derive_more::From; +use serde::{Deserialize, Serialize}; + +pub mod accessors; +pub mod v0; + +#[derive(Serialize, Deserialize, Encode, Decode, Debug, Clone, Copy, PartialEq, Eq, From)] +#[serde(tag = "$format_version")] +pub enum TokenKeepsHistoryRules { + #[serde(rename = "0")] + V0(TokenKeepsHistoryRulesV0), +} + +use crate::data_contract::associated_token::token_keeps_history_rules::v0::TokenKeepsHistoryRulesV0; +use std::fmt; + +impl fmt::Display for TokenKeepsHistoryRules { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TokenKeepsHistoryRules::V0(v0) => { + write!(f, "{}", v0) //just pass through + } + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/v0/accessors.rs b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/v0/accessors.rs new file mode 100644 index 00000000000..a837078a3d1 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/v0/accessors.rs @@ -0,0 +1,42 @@ +use crate::data_contract::associated_token::token_keeps_history_rules::accessors::v0::{ + TokenKeepsHistoryRulesV0Getters, TokenKeepsHistoryRulesV0Setters, +}; +use crate::data_contract::associated_token::token_keeps_history_rules::v0::TokenKeepsHistoryRulesV0; + +/// Implementing `TokenKeepsHistoryRulesV0Getters` for `TokenKeepsHistoryRulesV0` +impl TokenKeepsHistoryRulesV0Getters for TokenKeepsHistoryRulesV0 { + fn keeps_transfer_history(&self) -> bool { + self.keeps_transfer_history + } + + fn keeps_freezing_history(&self) -> bool { + self.keeps_freezing_history + } + + fn keeps_minting_history(&self) -> bool { + self.keeps_minting_history + } + + fn keeps_burning_history(&self) -> bool { + self.keeps_burning_history + } +} + +/// Implementing `TokenKeepsHistoryRulesV0Setters` for `TokenKeepsHistoryRulesV0` +impl TokenKeepsHistoryRulesV0Setters for TokenKeepsHistoryRulesV0 { + fn set_keeps_transfer_history(&mut self, value: bool) { + self.keeps_transfer_history = value; + } + + fn set_keeps_freezing_history(&mut self, value: bool) { + self.keeps_freezing_history = value; + } + + fn set_keeps_minting_history(&mut self, value: bool) { + self.keeps_minting_history = value; + } + + fn set_keeps_burning_history(&mut self, value: bool) { + self.keeps_burning_history = value; + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/v0/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/v0/mod.rs new file mode 100644 index 00000000000..fd2a287f792 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_keeps_history_rules/v0/mod.rs @@ -0,0 +1,57 @@ +mod accessors; +use bincode::Encode; +use platform_serialization::de::Decode; +use serde::{Deserialize, Serialize}; +use std::fmt; + +/// The rules for keeping a ledger as documents of token events. +/// Config update, Destroying Frozen Funds, Emergency Action, +/// Pre Programmed Token Release always require an entry to the ledger +#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, Copy, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct TokenKeepsHistoryRulesV0 { + /// Whether transfer history is recorded. + #[serde(default = "default_true")] + pub keeps_transfer_history: bool, + + /// Whether freezing history is recorded. + #[serde(default = "default_true")] + pub keeps_freezing_history: bool, + + /// Whether minting history is recorded. + #[serde(default = "default_true")] + pub keeps_minting_history: bool, + + /// Whether burning history is recorded. + #[serde(default = "default_true")] + pub keeps_burning_history: bool, +} + +impl Default for TokenKeepsHistoryRulesV0 { + fn default() -> Self { + TokenKeepsHistoryRulesV0 { + keeps_transfer_history: true, + keeps_freezing_history: true, + keeps_minting_history: true, + keeps_burning_history: true, + } + } +} + +/// Provides a default value of `true` for boolean fields. +fn default_true() -> bool { + true +} + +impl fmt::Display for TokenKeepsHistoryRulesV0 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "TokenKeepsHistoryRulesV0 {{\n keeps_transfer_history: {},\n keeps_freezing_history: {},\n keeps_minting_history: {},\n keeps_burning_history: {}\n}}", + self.keeps_transfer_history, + self.keeps_freezing_history, + self.keeps_minting_history, + self.keeps_burning_history + ) + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/encode.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/encode.rs index 2d693dd8ae8..2d709bc5ed0 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/encode.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/encode.rs @@ -1,81 +1,152 @@ -use crate::balances::credits::{SignedTokenAmount, TokenAmount}; +use crate::balances::credits::TokenAmount; use crate::data_contract::associated_token::token_perpetual_distribution::distribution_function::DistributionFunction; use bincode::{BorrowDecode, Decode, Encode}; -use ordered_float::NotNan; +use std::collections::BTreeMap; -/// Helper function to decode a `NotNan` safely. -fn decode_not_nan( - decoder: &mut D, -) -> Result, bincode::error::DecodeError> { - NotNan::new(f64::decode(decoder)?) - .map_err(|_| bincode::error::DecodeError::OtherString("Invalid float: NaN".into())) -} - -// Implement Encode for DistributionFunction impl Encode for DistributionFunction { fn encode( &self, encoder: &mut E, ) -> Result<(), bincode::error::EncodeError> { match self { - DistributionFunction::FixedAmount { n } => { + DistributionFunction::FixedAmount { amount: n } => { 0u8.encode(encoder)?; n.encode(encoder)?; } + DistributionFunction::Random { min, max } => { + 1u8.encode(encoder)?; + min.encode(encoder)?; + max.encode(encoder)?; + } DistributionFunction::StepDecreasingAmount { step_count, - decrease_per_interval, + decrease_per_interval_numerator, + decrease_per_interval_denominator, + s, n, + min_value, } => { - 1u8.encode(encoder)?; + 2u8.encode(encoder)?; step_count.encode(encoder)?; - decrease_per_interval.into_inner().encode(encoder)?; + decrease_per_interval_numerator.encode(encoder)?; + decrease_per_interval_denominator.encode(encoder)?; + s.encode(encoder)?; n.encode(encoder)?; + min_value.encode(encoder)?; } - DistributionFunction::LinearInteger { a, b } => { - 2u8.encode(encoder)?; - a.encode(encoder)?; - b.encode(encoder)?; - } - DistributionFunction::LinearFloat { a, b } => { + DistributionFunction::Stepwise(steps) => { 3u8.encode(encoder)?; - a.into_inner().encode(encoder)?; - b.encode(encoder)?; + steps.encode(encoder)?; } - DistributionFunction::PolynomialInteger { a, n, b } => { + DistributionFunction::Linear { + a, + d, + s, + b, + min_value, + max_value, + } => { 4u8.encode(encoder)?; a.encode(encoder)?; - n.encode(encoder)?; + d.encode(encoder)?; + s.encode(encoder)?; b.encode(encoder)?; + min_value.encode(encoder)?; + max_value.encode(encoder)?; } - DistributionFunction::PolynomialFloat { a, n, b } => { + DistributionFunction::Polynomial { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { 5u8.encode(encoder)?; - a.into_inner().encode(encoder)?; - n.into_inner().encode(encoder)?; + a.encode(encoder)?; + d.encode(encoder)?; + m.encode(encoder)?; + n.encode(encoder)?; + o.encode(encoder)?; + s.encode(encoder)?; b.encode(encoder)?; + min_value.encode(encoder)?; + max_value.encode(encoder)?; } - DistributionFunction::Exponential { a, b, c } => { + DistributionFunction::Exponential { + a, + d, + m, + n, + o, + s, + c, + min_value, + max_value, + } => { 6u8.encode(encoder)?; - a.into_inner().encode(encoder)?; - b.into_inner().encode(encoder)?; + a.encode(encoder)?; + d.encode(encoder)?; + m.encode(encoder)?; + n.encode(encoder)?; + o.encode(encoder)?; + s.encode(encoder)?; c.encode(encoder)?; + min_value.encode(encoder)?; + max_value.encode(encoder)?; } - DistributionFunction::Logarithmic { a, b, c } => { + DistributionFunction::Logarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { 7u8.encode(encoder)?; - a.into_inner().encode(encoder)?; - b.into_inner().encode(encoder)?; - c.encode(encoder)?; + a.encode(encoder)?; + d.encode(encoder)?; + m.encode(encoder)?; + n.encode(encoder)?; + o.encode(encoder)?; + s.encode(encoder)?; + b.encode(encoder)?; + min_value.encode(encoder)?; + max_value.encode(encoder)?; } - DistributionFunction::Stepwise(steps) => { + DistributionFunction::InvertedLogarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { 8u8.encode(encoder)?; - steps.encode(encoder)?; + a.encode(encoder)?; + d.encode(encoder)?; + m.encode(encoder)?; + n.encode(encoder)?; + o.encode(encoder)?; + s.encode(encoder)?; + b.encode(encoder)?; + min_value.encode(encoder)?; + max_value.encode(encoder)?; } } Ok(()) } } -// Implement Decode for DistributionFunction impl Decode for DistributionFunction { fn decode( decoder: &mut D, @@ -84,55 +155,136 @@ impl Decode for DistributionFunction { match variant { 0 => { let n = TokenAmount::decode(decoder)?; - Ok(Self::FixedAmount { n }) + Ok(Self::FixedAmount { amount: n }) } 1 => { - let step_count = u64::decode(decoder)?; - let decrease_per_interval = decode_not_nan(decoder)?; + let min = TokenAmount::decode(decoder)?; + let max = TokenAmount::decode(decoder)?; + Ok(Self::Random { min, max }) + } + 2 => { + let step_count = u32::decode(decoder)?; + let decrease_per_interval_numerator = u16::decode(decoder)?; + let decrease_per_interval_denominator = u16::decode(decoder)?; + let s = Option::::decode(decoder)?; let n = TokenAmount::decode(decoder)?; + let min_value = Option::::decode(decoder)?; Ok(Self::StepDecreasingAmount { + s, + decrease_per_interval_numerator, + decrease_per_interval_denominator, step_count, - decrease_per_interval, n, + min_value, }) } - 2 => { - let a = i64::decode(decoder)?; - let b = SignedTokenAmount::decode(decoder)?; - Ok(Self::LinearInteger { a, b }) - } 3 => { - let a = decode_not_nan(decoder)?; - let b = SignedTokenAmount::decode(decoder)?; - Ok(Self::LinearFloat { a, b }) + let steps = BTreeMap::::decode(decoder)?; + Ok(Self::Stepwise(steps)) } 4 => { let a = i64::decode(decoder)?; - let n = i64::decode(decoder)?; - let b = SignedTokenAmount::decode(decoder)?; - Ok(Self::PolynomialInteger { a, n, b }) + let d = u64::decode(decoder)?; + let s = Option::::decode(decoder)?; + let b = TokenAmount::decode(decoder)?; + let min_value = Option::::decode(decoder)?; + let max_value = Option::::decode(decoder)?; + Ok(Self::Linear { + a, + d, + s, + b, + min_value, + max_value, + }) } 5 => { - let a = decode_not_nan(decoder)?; - let n = decode_not_nan(decoder)?; - let b = SignedTokenAmount::decode(decoder)?; - Ok(Self::PolynomialFloat { a, n, b }) + let a = i64::decode(decoder)?; + let d = u64::decode(decoder)?; + let m = i64::decode(decoder)?; + let n = u64::decode(decoder)?; + let o = i64::decode(decoder)?; + let s = Option::::decode(decoder)?; + let b = TokenAmount::decode(decoder)?; + let min_value = Option::::decode(decoder)?; + let max_value = Option::::decode(decoder)?; + Ok(Self::Polynomial { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + }) } 6 => { - let a = decode_not_nan(decoder)?; - let b = decode_not_nan(decoder)?; - let c = SignedTokenAmount::decode(decoder)?; - Ok(Self::Exponential { a, b, c }) + let a = u64::decode(decoder)?; + let d = u64::decode(decoder)?; + let m = i64::decode(decoder)?; + let n = u64::decode(decoder)?; + let o = i64::decode(decoder)?; + let s = Option::::decode(decoder)?; + let c = TokenAmount::decode(decoder)?; + let min_value = Option::::decode(decoder)?; + let max_value = Option::::decode(decoder)?; + Ok(Self::Exponential { + a, + d, + m, + n, + o, + s, + c, + min_value, + max_value, + }) } 7 => { - let a = decode_not_nan(decoder)?; - let b = decode_not_nan(decoder)?; - let c = SignedTokenAmount::decode(decoder)?; - Ok(Self::Logarithmic { a, b, c }) + let a = i64::decode(decoder)?; + let d = u64::decode(decoder)?; + let m = u64::decode(decoder)?; + let n = u64::decode(decoder)?; + let o = i64::decode(decoder)?; + let s = Option::::decode(decoder)?; + let b = TokenAmount::decode(decoder)?; + let min_value = Option::::decode(decoder)?; + let max_value = Option::::decode(decoder)?; + Ok(Self::Logarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + }) } 8 => { - let steps = Vec::<(u64, TokenAmount)>::decode(decoder)?; - Ok(Self::Stepwise(steps)) + let a = i64::decode(decoder)?; + let d = u64::decode(decoder)?; + let m = u64::decode(decoder)?; + let n = u64::decode(decoder)?; + let o = i64::decode(decoder)?; + let s = Option::::decode(decoder)?; + let b = TokenAmount::decode(decoder)?; + let min_value = Option::::decode(decoder)?; + let max_value = Option::::decode(decoder)?; + Ok(Self::InvertedLogarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + }) } _ => Err(bincode::error::DecodeError::OtherString( "Invalid variant".into(), @@ -141,7 +293,6 @@ impl Decode for DistributionFunction { } } -// Implement BorrowDecode for DistributionFunction impl<'de> BorrowDecode<'de> for DistributionFunction { fn borrow_decode>( decoder: &mut D, @@ -150,55 +301,136 @@ impl<'de> BorrowDecode<'de> for DistributionFunction { match variant { 0 => { let n = TokenAmount::borrow_decode(decoder)?; - Ok(Self::FixedAmount { n }) + Ok(Self::FixedAmount { amount: n }) } 1 => { - let step_count = u64::borrow_decode(decoder)?; - let decrease_per_interval = decode_not_nan(decoder)?; + let min = TokenAmount::borrow_decode(decoder)?; + let max = TokenAmount::borrow_decode(decoder)?; + Ok(Self::Random { min, max }) + } + 2 => { + let step_count = u32::borrow_decode(decoder)?; + let decrease_per_interval_numerator = u16::borrow_decode(decoder)?; + let decrease_per_interval_denominator = u16::borrow_decode(decoder)?; + let s = Option::::borrow_decode(decoder)?; let n = TokenAmount::borrow_decode(decoder)?; + let min_value = Option::::borrow_decode(decoder)?; Ok(Self::StepDecreasingAmount { step_count, - decrease_per_interval, + decrease_per_interval_numerator, + decrease_per_interval_denominator, + s, n, + min_value, }) } - 2 => { - let a = i64::borrow_decode(decoder)?; - let b = SignedTokenAmount::borrow_decode(decoder)?; - Ok(Self::LinearInteger { a, b }) - } 3 => { - let a = decode_not_nan(decoder)?; - let b = SignedTokenAmount::borrow_decode(decoder)?; - Ok(Self::LinearFloat { a, b }) + let steps = BTreeMap::::borrow_decode(decoder)?; + Ok(Self::Stepwise(steps)) } 4 => { let a = i64::borrow_decode(decoder)?; - let n = i64::borrow_decode(decoder)?; - let b = SignedTokenAmount::borrow_decode(decoder)?; - Ok(Self::PolynomialInteger { a, n, b }) + let d = u64::borrow_decode(decoder)?; + let s = Option::::borrow_decode(decoder)?; + let b = TokenAmount::borrow_decode(decoder)?; + let min_value = Option::::borrow_decode(decoder)?; + let max_value = Option::::borrow_decode(decoder)?; + Ok(Self::Linear { + a, + d, + s, + b, + min_value, + max_value, + }) } 5 => { - let a = decode_not_nan(decoder)?; - let n = decode_not_nan(decoder)?; - let b = SignedTokenAmount::borrow_decode(decoder)?; - Ok(Self::PolynomialFloat { a, n, b }) + let a = i64::borrow_decode(decoder)?; + let d = u64::borrow_decode(decoder)?; + let m = i64::borrow_decode(decoder)?; + let n = u64::borrow_decode(decoder)?; + let o = i64::borrow_decode(decoder)?; + let s = Option::::borrow_decode(decoder)?; + let b = TokenAmount::borrow_decode(decoder)?; + let min_value = Option::::borrow_decode(decoder)?; + let max_value = Option::::borrow_decode(decoder)?; + Ok(Self::Polynomial { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + }) } 6 => { - let a = decode_not_nan(decoder)?; - let b = decode_not_nan(decoder)?; - let c = SignedTokenAmount::borrow_decode(decoder)?; - Ok(Self::Exponential { a, b, c }) + let a = u64::borrow_decode(decoder)?; + let d = u64::borrow_decode(decoder)?; + let m = i64::borrow_decode(decoder)?; + let n = u64::borrow_decode(decoder)?; + let o = i64::borrow_decode(decoder)?; + let s = Option::::borrow_decode(decoder)?; + let c = TokenAmount::borrow_decode(decoder)?; + let min_value = Option::::borrow_decode(decoder)?; + let max_value = Option::::borrow_decode(decoder)?; + Ok(Self::Exponential { + a, + d, + m, + n, + o, + s, + c, + min_value, + max_value, + }) } 7 => { - let a = decode_not_nan(decoder)?; - let b = decode_not_nan(decoder)?; - let c = SignedTokenAmount::borrow_decode(decoder)?; - Ok(Self::Logarithmic { a, b, c }) + let a = i64::borrow_decode(decoder)?; + let d = u64::borrow_decode(decoder)?; + let m = u64::borrow_decode(decoder)?; + let n = u64::borrow_decode(decoder)?; + let o = i64::borrow_decode(decoder)?; + let s = Option::::borrow_decode(decoder)?; + let b = TokenAmount::borrow_decode(decoder)?; + let min_value = Option::::borrow_decode(decoder)?; + let max_value = Option::::borrow_decode(decoder)?; + Ok(Self::Logarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + }) } 8 => { - let steps = Vec::<(u64, TokenAmount)>::borrow_decode(decoder)?; - Ok(Self::Stepwise(steps)) + let a = i64::borrow_decode(decoder)?; + let d = u64::borrow_decode(decoder)?; + let m = u64::borrow_decode(decoder)?; + let n = u64::borrow_decode(decoder)?; + let o = i64::borrow_decode(decoder)?; + let s = Option::::borrow_decode(decoder)?; + let b = TokenAmount::borrow_decode(decoder)?; + let min_value = Option::::borrow_decode(decoder)?; + let max_value = Option::::borrow_decode(decoder)?; + Ok(Self::InvertedLogarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + }) } _ => Err(bincode::error::DecodeError::OtherString( "Invalid variant".into(), diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/evaluate.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/evaluate.rs new file mode 100644 index 00000000000..4ad0c6107cc --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/evaluate.rs @@ -0,0 +1,1317 @@ +use crate::balances::credits::TokenAmount; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_function::DistributionFunction; +use crate::ProtocolError; +// adjust the import path as needed + +impl DistributionFunction { + /// Evaluates the distribution function at the given period `x`. + /// + /// If an optional start period (`s`) is not provided, it defaults to 0. + /// + /// # Returns + /// A `Result` with the computed token amount or a `ProtocolError` in case of a + /// divide-by-zero, undefined operation (e.g. log of non-positive number), or overflow. + pub fn evaluate(&self, x: u64) -> Result { + match self { + DistributionFunction::FixedAmount { amount: n } => { + // For fixed amount, simply return n. + Ok(*n) + } + DistributionFunction::Random { min, max } => { + // Ensure that min is not greater than max. + if *min > *max { + return Err(ProtocolError::Overflow( + "Random: min must be less than or equal to max".into(), + )); + } + + // Use x (the period) as the seed for the PRF. + let seed = x; + // A simple SplitMix64-based PRF. + let mut z = seed.wrapping_add(0x9E3779B97F4A7C15); + z = (z ^ (z >> 30)).wrapping_mul(0xBF58476D1CE4E5B9); + z = (z ^ (z >> 27)).wrapping_mul(0x94D049BB133111EB); + z = z ^ (z >> 31); + + // Calculate the range size: (max - min + 1) + let range = max.wrapping_sub(*min).wrapping_add(1); + + // Map the pseudorandom number into the desired range. + let value = min.wrapping_add(z % range); + + Ok(value) + } + + DistributionFunction::StepDecreasingAmount { + step_count, + decrease_per_interval_numerator, + decrease_per_interval_denominator, + s, + n, + min_value, + } => { + // Check for division by zero in the denominator: + if *decrease_per_interval_denominator == 0 { + return Err(ProtocolError::DivideByZero( + "StepDecreasingAmount: denominator is 0", + )); + } + let s_val = s.unwrap_or(0); + // Compute the number of steps passed. + let steps = if x > s_val { + (x - s_val) / (*step_count as u64) + } else { + 0 + }; + let reduction = 1.0 + - ((*decrease_per_interval_numerator as f64) + / (*decrease_per_interval_denominator as f64)); + let factor = reduction.powf(steps as f64); + let result = (*n as f64) * factor; + // Clamp to min_value if provided. + let clamped = if let Some(min) = min_value { + result.max(*min as f64) + } else { + result + }; + if !clamped.is_finite() || clamped > (u64::MAX as f64) || clamped < 0.0 { + return Err(ProtocolError::Overflow( + "StepDecreasingAmount evaluation overflow or negative", + )); + } + Ok(clamped as TokenAmount) + } + + DistributionFunction::Stepwise(steps) => { + // Return the emission corresponding to the greatest key <= x. + Ok(steps + .range(..=x) + .next_back() + .map(|(_, amount)| *amount) + .unwrap_or(0)) + } + // f(x) = (a * (x - s) / d) + b + DistributionFunction::Linear { + a, + d, + s, + b, + min_value, + max_value, + } => { + if *d == 0 { + return Err(ProtocolError::DivideByZero( + "Linear function: divisor d is 0", + )); + } + // Check that the value at x = 0 is within bounds. + let s_val = s.unwrap_or(0); + + let diff = x.saturating_sub(s_val) as i128; + let value = (((*a as i128) * diff / (*d as i128)) as i64) + .checked_add(*b as i64) + .ok_or(ProtocolError::Overflow( + "Linear function evaluation overflow or negative", + ))?; + + let value = if value < 0 { 0 } else { value as u64 }; + if let Some(min_value) = min_value { + if value < *min_value { + return Ok(*min_value); + } + } + + if let Some(max_value) = max_value { + if value > *max_value { + return Ok(*max_value); + } + } + Ok(value as TokenAmount) + } + // f(x) = (a * (x - s + o)^(m/n)) / d + b + DistributionFunction::Polynomial { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + if *d == 0 { + return Err(ProtocolError::DivideByZero( + "Polynomial function: divisor d is 0", + )); + } + if *n == 0 { + return Err(ProtocolError::DivideByZero( + "Polynomial function: exponent denominator n is 0", + )); + } + let s_val = s.unwrap_or(0); + let exponent = (*m as f64) / (*n as f64); + let diff = x as i128 - s_val as i128 + *o as i128; + + if diff < 0 { + return Err(ProtocolError::Overflow( + "Polynomial function: argument is non-positive".into(), + )); + } + + if diff > u64::MAX as i128 { + return Err(ProtocolError::Overflow( + "Polynomial function: argument is too big (max should be u64::MAX)".into(), + )); + } + + let diff_exp = (diff as f64).powf(exponent); + + if !diff_exp.is_finite() || diff_exp.abs() > (u64::MAX as f64) { + return Err(ProtocolError::Overflow( + "Polynomial function evaluation overflow or negative", + )); + } + + let pol = diff_exp as i128; + + let value = (((*a as i128) * pol / (*d as i128)) as i64) + .checked_add(*b as i64) + .ok_or(ProtocolError::Overflow( + "Polynomial function evaluation overflow or negative", + ))?; + + let value = if value < 0 { 0 } else { value as u64 }; + + if let Some(min_value) = min_value { + if value < *min_value { + return Ok(*min_value); + } + } + if let Some(max_value) = max_value { + if value > *max_value { + return Ok(*max_value); + } + } + Ok(value) + } + + DistributionFunction::Exponential { + a, + d, + m, + n, + o, + s, + c, + min_value, + max_value, + } => { + if *d == 0 { + return Err(ProtocolError::DivideByZero( + "Exponential function: divisor d is 0", + )); + } + if *n == 0 { + return Err(ProtocolError::DivideByZero( + "Exponential function: exponent denominator n is 0", + )); + } + let s_val = s.unwrap_or(0); + let diff = x as i128 - s_val as i128 + *o as i128; + + if diff < -(u64::MAX as i128) { + return Err(ProtocolError::Overflow( + "Exponential function: argument is too small (min should be -u64::MAX)" + .into(), + )); + } + + if diff > u64::MAX as i128 { + return Err(ProtocolError::Overflow( + "Exponential function: argument is too big (max should be u64::MAX)".into(), + )); + } + + let exponent = (*m as f64) * (diff as f64) / (*n as f64); + let value = ((*a as f64) * exponent.exp() / (*d as f64)) + (*c as f64); + if let Some(max_value) = max_value { + if value.is_infinite() && value.is_sign_positive() || value > *max_value as f64 + { + return Ok(*max_value); + } + } + if !value.is_finite() || value > (u64::MAX as f64) { + return Err(ProtocolError::Overflow( + "Exponential function evaluation overflow or negative", + )); + } + + if value < 0.0 { + return if let Some(min_value) = min_value { + Ok(*min_value) + } else { + Ok(0) + }; + } + + let value_u64 = value as u64; + if let Some(min_value) = min_value { + if value_u64 < *min_value { + return Ok(*min_value); + } + } + Ok(value_u64) + } + + DistributionFunction::Logarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + if *d == 0 { + return Err(ProtocolError::DivideByZero( + "Logarithmic function: divisor d is 0", + )); + } + if *n == 0 { + return Err(ProtocolError::DivideByZero("Logarithmic function: n is 0")); + } + let s_val = s.unwrap_or(0); + let diff = x as i128 - s_val as i128 + *o as i128; + + if diff <= 0 { + return Err(ProtocolError::Overflow( + "Logarithmic function: argument for log is non-positive".into(), + )); + } + + if diff > u64::MAX as i128 { + return Err(ProtocolError::Overflow("Logarithmic function: argument for log is too big (max should be u64::MAX)".into())); + } + + let argument = (*m as f64) * (diff as f64) / (*n as f64); + + let log_val = argument.ln(); + let value = ((*a as f64) * log_val / (*d as f64)) + (*b as f64); + if let Some(max_value) = max_value { + if value.is_infinite() && value.is_sign_positive() || value > *max_value as f64 + { + return Ok(*max_value); + } + } + if !value.is_finite() || value > (u64::MAX as f64) { + return Err(ProtocolError::Overflow( + "Logarithmic function evaluation overflow or negative", + )); + } + if value < 0.0 { + return if let Some(min_value) = min_value { + Ok(*min_value) + } else { + Ok(0) + }; + } + let value_u64 = value as u64; + if let Some(min_value) = min_value { + if value_u64 < *min_value { + return Ok(*min_value); + } + } + Ok(value_u64) + } + DistributionFunction::InvertedLogarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + // Check for division-by-zero: d, n, and m must be non-zero. + if *d == 0 { + return Err(ProtocolError::DivideByZero( + "InvertedLogarithmic: divisor d is 0".into(), + )); + } + if *n == 0 { + return Err(ProtocolError::DivideByZero( + "InvertedLogarithmic: parameter n is 0".into(), + )); + } + if *m == 0 { + return Err(ProtocolError::DivideByZero( + "InvertedLogarithmic: parameter m is 0".into(), + )); + } + + // Use the provided start period or default to 0. + let s_val = s.unwrap_or(0); + + // Compute the adjusted time difference: (x - s + o). + // We use i128 to prevent overflow issues. + let diff = x as i128 - s_val as i128 + *o as i128; + + // For the inverted logarithmic formula f(x) = (a * ln(n / (m * (x - s + o)))) / d + b, + // the denominator inside the log must be positive. + if diff <= 0 { + return Err(ProtocolError::Overflow( + "InvertedLogarithmic: (x - s + o) must be > 0".into(), + )); + } + + // Calculate the denominator for the logarithm: m * (x - s + o) + let denom_f = (*m as f64) * (diff as f64); + if denom_f <= 0.0 { + return Err(ProtocolError::Overflow( + "InvertedLogarithmic: computed denominator is non-positive".into(), + )); + } + + // Compute the logarithm argument: n / (m * (x - s + o)) + let argument = (*n as f64) / denom_f; + if argument <= 0.0 { + return Err(ProtocolError::Overflow( + "InvertedLogarithmic: log argument is non-positive".into(), + )); + } + + let log_val = argument.ln(); + + // Compute the final value: (a * ln(...)) / d + b. + let value = ((*a as f64) * log_val / (*d as f64)) + (*b as f64); + + // Clamp to max_value if provided. + if let Some(max_value) = max_value { + if value > *max_value as f64 + || (value.is_infinite() && value.is_sign_positive()) + { + return Ok(*max_value); + } + } + + // Ensure the computed value is finite and within the u64 range. + if !value.is_finite() || value > (u64::MAX as f64) { + return Err(ProtocolError::Overflow( + "InvertedLogarithmic: evaluation overflow".into(), + )); + } + + if value < 0.0 { + return if let Some(min_value) = min_value { + Ok(*min_value) + } else { + Ok(0) + }; + } + + let value_u64 = value as u64; + + // Clamp to min_value if provided. + if let Some(min_value) = min_value { + if value_u64 < *min_value { + return Ok(*min_value); + } + } + Ok(value_u64) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::BTreeMap; + + #[test] + fn test_fixed_amount() { + let distribution = DistributionFunction::FixedAmount { amount: 100 }; + assert_eq!(distribution.evaluate(0).unwrap(), 100); + assert_eq!(distribution.evaluate(50).unwrap(), 100); + assert_eq!(distribution.evaluate(1000).unwrap(), 100); + } + + #[test] + fn test_stepwise_emission() { + let mut steps = BTreeMap::new(); + steps.insert(0, 100); + steps.insert(10, 50); + steps.insert(20, 25); + + let distribution = DistributionFunction::Stepwise(steps); + assert_eq!(distribution.evaluate(0).unwrap(), 100); + assert_eq!(distribution.evaluate(5).unwrap(), 100); + assert_eq!(distribution.evaluate(10).unwrap(), 50); + assert_eq!(distribution.evaluate(15).unwrap(), 50); + assert_eq!(distribution.evaluate(20).unwrap(), 25); + assert_eq!(distribution.evaluate(30).unwrap(), 25); + } + + #[test] + fn test_step_decreasing_amount() { + let distribution = DistributionFunction::StepDecreasingAmount { + step_count: 10, + decrease_per_interval_numerator: 1, + decrease_per_interval_denominator: 2, // 50% reduction per step + s: Some(0), + n: 100, + min_value: Some(10), + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 100); + assert_eq!(distribution.evaluate(9).unwrap(), 100); + assert_eq!(distribution.evaluate(10).unwrap(), 50); + assert_eq!(distribution.evaluate(20).unwrap(), 25); + assert_eq!(distribution.evaluate(30).unwrap(), 12); + assert_eq!(distribution.evaluate(40).unwrap(), 10); // Should not go below min_value + } + + #[test] + fn test_step_decreasing_amount_divide_by_zero() { + let distribution = DistributionFunction::StepDecreasingAmount { + step_count: 10, + decrease_per_interval_numerator: 1, + decrease_per_interval_denominator: 0, // Invalid denominator + s: Some(0), + n: 100, + min_value: Some(10), + }; + + assert!(matches!( + distribution.evaluate(10), + Err(ProtocolError::DivideByZero(_)) + )); + } + mod random { + use super::*; + + #[test] + fn test_random_distribution_with_valid_range() { + let distribution = DistributionFunction::Random { min: 10, max: 100 }; + + for x in 0..100 { + let result = distribution.evaluate(x).unwrap(); + assert!( + (10..=100).contains(&result), + "Random value {} is out of range for x = {}", + result, + x + ); + } + } + + #[test] + fn test_random_distribution_with_single_value_range() { + let distribution = DistributionFunction::Random { min: 42, max: 42 }; + + for x in 0..10 { + let result = distribution.evaluate(x).unwrap(); + assert_eq!( + result, 42, + "Expected fixed output 42, got {} for x = {}", + result, x + ); + } + } + + #[test] + fn test_random_distribution_invalid_range() { + let distribution = DistributionFunction::Random { min: 50, max: 40 }; + + let result = distribution.evaluate(0); + assert!( + matches!(result, Err(ProtocolError::Overflow(_))), + "Expected ProtocolError::Overflow but got {:?}", + result + ); + } + + #[test] + fn test_random_distribution_deterministic_for_same_x() { + let distribution = DistributionFunction::Random { min: 10, max: 100 }; + + let value1 = distribution.evaluate(42).unwrap(); + let value2 = distribution.evaluate(42).unwrap(); + + assert_eq!( + value1, value2, + "Random distribution should be deterministic for the same x" + ); + } + + #[test] + fn test_random_distribution_varies_for_different_x() { + let distribution = DistributionFunction::Random { min: 10, max: 100 }; + + let value1 = distribution.evaluate(1).unwrap(); + let value2 = distribution.evaluate(2).unwrap(); + + assert_ne!( + value1, value2, + "Random distribution should vary for different x values" + ); + } + } + mod linear { + use super::*; + #[test] + fn test_linear_function_increasing() { + let distribution = DistributionFunction::Linear { + a: 10, + d: 2, + s: Some(0), + b: 50, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 50); + assert_eq!(distribution.evaluate(2).unwrap(), 60); + assert_eq!(distribution.evaluate(4).unwrap(), 70); + assert_eq!(distribution.evaluate(6).unwrap(), 80); + } + + #[test] + fn test_linear_function_decreasing() { + let distribution = DistributionFunction::Linear { + a: -5, + d: 1, + s: Some(0), + b: 100, + min_value: Some(10), + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 100); + assert_eq!(distribution.evaluate(10).unwrap(), 50); + assert_eq!(distribution.evaluate(20).unwrap(), 10); // Should not go below min_value + } + + #[test] + fn test_linear_function_divide_by_zero() { + let distribution = DistributionFunction::Linear { + a: 10, + d: 0, // Invalid denominator + s: Some(0), + b: 50, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(10), + Err(ProtocolError::DivideByZero(_)) + )); + } + } + mod polynomial { + use super::*; + #[test] + fn test_polynomial_function() { + let distribution = DistributionFunction::Polynomial { + a: 2, + d: 1, + m: 2, + n: 1, + o: 0, + s: Some(0), + b: 10, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 10); + assert_eq!(distribution.evaluate(2).unwrap(), 18); + assert_eq!(distribution.evaluate(3).unwrap(), 28); + assert_eq!(distribution.evaluate(4).unwrap(), 42); + } + + #[test] + fn test_polynomial_function_overflow() { + let distribution = DistributionFunction::Polynomial { + a: i64::MAX, + d: 1, + m: 2, + n: 1, + o: 0, + s: Some(0), + b: 10, + min_value: None, + max_value: None, + }; + + let result = distribution.evaluate(1); + assert!( + matches!(result, Err(ProtocolError::Overflow(_))), + "Expected overflow but got {:?}", + result + ); + } + + // Test: Fractional exponent (exponent = 3/2) + #[test] + fn test_polynomial_function_fraction_exponent() { + let distribution = DistributionFunction::Polynomial { + a: 1, + d: 1, + m: 3, // exponent is 3/2 + n: 2, + o: 0, + s: Some(0), + b: 0, + min_value: None, + max_value: None, + }; + // (4 - 0 + 0)^(3/2) = 4^(3/2) = (sqrt(4))^3 = 2^3 = 8. + assert_eq!(distribution.evaluate(4).unwrap(), 8); + } + + // Test: Negative coefficient a (should flip the sign) + #[test] + fn test_polynomial_function_negative_a() { + let distribution = DistributionFunction::Polynomial { + a: -1, + d: 1, + m: 2, + n: 1, + o: 0, + s: Some(0), + b: 0, + min_value: None, + max_value: None, + }; + // f(x) = -1 * (x^2). For x = 3: -1 * (3^2) = -9. + assert_eq!(distribution.evaluate(3).unwrap(), 0); + } + + // Test: Non-zero shift parameter s (shifting the x coordinate) + #[test] + fn test_polynomial_function_with_shift() { + let distribution = DistributionFunction::Polynomial { + a: 2, + d: 1, + m: 2, + n: 1, + o: 0, + s: Some(2), + b: 10, + min_value: None, + max_value: None, + }; + // f(x) = 2 * ((x - 2)^2) + 10. + // At x = 2: (0)^2 = 0, f(2) = 10. + assert_eq!(distribution.evaluate(2).unwrap(), 10); + // At x = 3: (3 - 2)^2 = 1, f(3) = 2*1 + 10 = 12. + assert_eq!(distribution.evaluate(3).unwrap(), 12); + } + + // Test: Non-zero offset o (shifting the base of the power) + #[test] + fn test_polynomial_function_with_offset() { + let distribution = DistributionFunction::Polynomial { + a: 2, + d: 1, + m: 2, + n: 1, + o: 3, + s: Some(0), + b: 10, + min_value: None, + max_value: None, + }; + // f(x) = 2 * ((x - 0 + 3)^2) + 10. + // At x = 1: (1 + 3) = 4, 4^2 = 16, then 2*16 + 10 = 42. + assert_eq!(distribution.evaluate(1).unwrap(), 42); + } + + // Test: Constant function when m = 0 (should ignore x) + #[test] + fn test_polynomial_function_constant() { + let distribution = DistributionFunction::Polynomial { + a: 5, + d: 1, + m: 0, // exponent 0 => (x-s+o)^0 = 1 (for any x where x-s+o ≠ 0) + n: 1, + o: 0, + s: Some(0), + b: 3, + min_value: None, + max_value: None, + }; + // f(x) = 5*1 + 3 = 8 for any x. + for x in [0, 10, 100].iter() { + assert_eq!(distribution.evaluate(*x).unwrap(), 8); + } + } + + // Test: Linear function when exponent is 1 (m = 1, n = 1) + #[test] + fn test_polynomial_function_linear() { + let distribution = DistributionFunction::Polynomial { + a: 3, + d: 1, + m: 1, + n: 1, + o: 0, + s: Some(0), + b: 5, + min_value: None, + max_value: None, + }; + // f(x) = 3*x + 5. At x = 10, f(10) = 30 + 5 = 35. + assert_eq!(distribution.evaluate(10).unwrap(), 35); + } + + // Test: Cubic function (m = 3, n = 1) + #[test] + fn test_polynomial_function_cubic() { + let distribution = DistributionFunction::Polynomial { + a: 1, + d: 1, + m: 3, + n: 1, + o: 0, + s: Some(0), + b: 0, + min_value: None, + max_value: None, + }; + // f(x) = x^3. At x = 4, f(4) = 64. + assert_eq!(distribution.evaluate(4).unwrap(), 64); + } + + // Test: Combination of non-zero offset and shift + #[test] + fn test_polynomial_function_with_offset_and_shift() { + let distribution = DistributionFunction::Polynomial { + a: 1, + d: 1, + m: 2, + n: 1, + o: 2, + s: Some(1), + b: 0, + min_value: None, + max_value: None, + }; + // f(x) = ( (x - 1 + 2)^2 ). + // At x = 3: (3 - 1 + 2) = 4, and 4^2 = 16. + assert_eq!(distribution.evaluate(3).unwrap(), 16); + } + } + mod exp { + use super::*; + #[test] + fn test_exponential_function() { + let distribution = DistributionFunction::Exponential { + a: 1, + d: 1, + m: 1, + n: 1, + o: 0, + s: Some(0), + c: 10, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 11); + assert!(distribution.evaluate(10).unwrap() > 20); + } + + #[test] + fn test_exponential_function_divide_by_zero() { + let distribution = DistributionFunction::Exponential { + a: 1, + d: 0, // Invalid denominator + m: 1, + n: 1, + o: 0, + s: Some(0), + c: 10, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(10), + Err(ProtocolError::DivideByZero(_)) + )); + } + + #[test] + fn test_exponential_function_basic() { + let distribution = DistributionFunction::Exponential { + a: 2, + d: 1, + m: 1, + n: 1, + o: 0, + s: Some(0), + c: 5, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 7); + assert_eq!(distribution.evaluate(5).unwrap(), 301); + assert_eq!(distribution.evaluate(10).unwrap(), 44057); + } + + #[test] + fn test_exponential_function_slow_growth() { + let distribution = DistributionFunction::Exponential { + a: 1, + d: 10, + m: 1, + n: 10, + o: 0, + s: Some(0), + c: 0, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 0); + assert_eq!(distribution.evaluate(50).unwrap(), 14); + assert_eq!(distribution.evaluate(100).unwrap(), 2202); + } + + #[test] + fn test_exponential_function_rapid_growth() { + let distribution = DistributionFunction::Exponential { + a: 1, + d: 1, + m: 4, + n: 1, + o: 0, + s: Some(0), + c: 0, + min_value: None, + max_value: Some(100000000), + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 1); + assert_eq!(distribution.evaluate(2).unwrap(), 2980); + assert_eq!(distribution.evaluate(4).unwrap(), 8886110); + assert_eq!(distribution.evaluate(10).unwrap(), 100000000); + assert_eq!(distribution.evaluate(100000).unwrap(), 100000000); + } + + #[test] + fn test_exponential_function_with_no_min_value() { + let distribution = DistributionFunction::Exponential { + a: 2, + d: 1, + m: -1, + n: 1, + o: 0, + s: Some(0), + c: 10, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 12); // f(0) = (2 * e^(-1 * (0 - 0 + 0) / 1)) / 1 + 10 + assert_eq!(distribution.evaluate(5).unwrap(), 10); + assert_eq!(distribution.evaluate(10000).unwrap(), 10); + } + + #[test] + fn test_exponential_function_with_min_value() { + let distribution = DistributionFunction::Exponential { + a: 2, + d: 1, + m: -1, + n: 1, + o: 0, + s: Some(0), + c: 10, + min_value: Some(11), + max_value: None, + }; + + assert_eq!(distribution.evaluate(0).unwrap(), 12); // f(0) = (2 * e^(-1 * (0 - 0 + 0) / 1)) / 1 + 10 + assert_eq!(distribution.evaluate(5).unwrap(), 11); + assert_eq!(distribution.evaluate(100).unwrap(), 11); + } + + #[test] + fn test_exponential_function_starting_at_max() { + let distribution = DistributionFunction::Exponential { + a: 2, + d: 1, + m: 1, + n: 2, + o: 0, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(11), // Set max at the starting value + }; + + assert_eq!( + distribution.evaluate(0).unwrap(), + 11, + "Function should start at the max value" + ); + assert_eq!( + distribution.evaluate(5).unwrap(), + 11, + "Function should be clamped at max value" + ); + } + + #[test] + fn test_exponential_function_large_x_overflow() { + let distribution = DistributionFunction::Exponential { + a: 2, + d: 1, + m: 1, + n: 10, + o: 0, + s: Some(0), + c: 5, + min_value: None, + max_value: None, + }; + + let result = distribution.evaluate(100000); + assert!( + matches!(result, Err(ProtocolError::Overflow(_))), + "Expected overflow but got {:?}", + result + ); + } + } + mod log { + use super::*; + #[test] + fn test_logarithmic_function() { + let distribution = DistributionFunction::Logarithmic { + a: 10, + d: 1, + m: 1, + n: 1, + o: 1, // Offset ensures (x - s + o) > 0 + s: Some(1), // Start at x=1 to avoid log(0) + b: 5, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(1).unwrap(), 5); + assert!(distribution.evaluate(10).unwrap() > 5); + } + + #[test] + fn test_logarithmic_function_with_min_max_bounds() { + let distribution = DistributionFunction::Logarithmic { + a: 10, + d: 1, + m: 1, + n: 1, + o: 1, + s: Some(1), + b: 5, + min_value: Some(7), // Minimum bound should be enforced + max_value: Some(20), // Maximum bound should be enforced + }; + + assert_eq!(distribution.evaluate(1).unwrap(), 7); // Clamped to min_value + assert!(distribution.evaluate(10).unwrap() <= 20); // Should not exceed max_value + } + + #[test] + fn test_logarithmic_function_undefined() { + let distribution = DistributionFunction::Logarithmic { + a: 10, + d: 1, + m: 1, + n: 1, + o: -1, // Invalid offset causing log(0) + s: Some(1), + b: 5, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(1), + Err(ProtocolError::Overflow(_)) + )); + } + + #[test] + fn test_logarithmic_function_large_x() { + let distribution = DistributionFunction::Logarithmic { + a: 100, + d: 2, + m: 1, + n: 1, + o: 5, + s: Some(10), + b: 10, + min_value: None, + max_value: None, + }; + + let result = distribution.evaluate(100); + assert!(result.is_ok()); + assert!(result.unwrap() > 10); // Function should increase over time + } + + #[test] + fn test_logarithmic_function_divide_by_zero_d() { + let distribution = DistributionFunction::Logarithmic { + a: 10, + d: 0, // Invalid: Division by zero + m: 1, + n: 1, + o: 1, + s: Some(5), + b: 5, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(10), + Err(ProtocolError::DivideByZero(_)) + )); + } + + #[test] + fn test_logarithmic_function_divide_by_zero_n() { + let distribution = DistributionFunction::Logarithmic { + a: 10, + d: 1, + m: 1, + n: 0, // Invalid: Division by zero in log denominator + o: 1, + s: Some(5), + b: 5, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(10), + Err(ProtocolError::DivideByZero(_)) + )); + } + } + mod inverted_log { + use super::*; + #[test] + fn test_inverted_logarithmic_basic_decreasing() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: 10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: None, + max_value: None, + }; + + assert!(distribution.evaluate(1).unwrap() > distribution.evaluate(5).unwrap()); + assert!(distribution.evaluate(5).unwrap() > distribution.evaluate(10).unwrap()); + } + + #[test] + fn test_inverted_logarithmic_basic_increasing() { + // f(x) = (-10 * log( 1000 / (x + 10) )) + 5 + let distribution = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 1000, + o: 10, + s: Some(0), + b: 5, + min_value: None, + max_value: None, + }; + + let val1000 = distribution.evaluate(1000).unwrap(); + let val2000 = distribution.evaluate(2000).unwrap(); + let val3000 = distribution.evaluate(3000).unwrap(); + + assert!(val1000 < val2000, "Function should be increasing"); + assert!(val2000 < val3000, "Function should be increasing"); + } + + #[test] + fn test_inverted_logarithmic_negative_clamped_to_0() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: None, + max_value: None, + }; + + assert_eq!(distribution.evaluate(1).unwrap(), 0); // Should be clamped to 0 + } + + #[test] + fn test_inverted_logarithmic_clamped_by_min_value() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: 10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(7), + max_value: None, + }; + + assert_eq!(distribution.evaluate(1000).unwrap(), 7); // Should be clamped to min_value + } + + #[test] + fn test_inverted_logarithmic_clamped_by_max_value() { + // f(x) = (-10 * log( 100 / (x + 1) )) + 5 + let distribution = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: None, + max_value: Some(20), + }; + + assert_eq!(distribution.evaluate(500).unwrap(), 20); // Should be clamped to max_value + } + + #[test] + fn test_inverted_logarithmic_undefined_log_argument_zero() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: 10, + d: 1, + m: 1, + n: 100, + o: -1, + s: Some(1), + b: 5, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(1), + Err(ProtocolError::Overflow(_)) + )); + } + + #[test] + fn test_inverted_logarithmic_divide_by_zero_n() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: 10, + d: 1, + m: 1, + n: 0, // Invalid: n must not be zero + o: 1, + s: Some(5), + b: 5, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(10), + Err(ProtocolError::DivideByZero(_)) + )); + } + + #[test] + fn test_inverted_logarithmic_divide_by_zero_d() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: 10, + d: 0, // Invalid: d must not be zero + m: 1, + n: 1, + o: 1, + s: Some(5), + b: 5, + min_value: None, + max_value: None, + }; + + assert!(matches!( + distribution.evaluate(10), + Err(ProtocolError::DivideByZero(_)) + )); + } + + #[test] + fn test_inverted_logarithmic_increasing_starts_at_min_value() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: -10, // Increasing function + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(10), // Max value set at the starting point + }; + + assert_eq!( + distribution.evaluate(0).unwrap(), + 1, + "Function should start at the max value" + ); + assert_eq!( + distribution.evaluate(200).unwrap(), + 10, + "Function should remain clamped at max value" + ); + } + + #[test] + fn test_inverted_logarithmic_starts_at_min_value() { + let distribution = DistributionFunction::InvertedLogarithmic { + a: 10, // Decreasing function + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(3), + max_value: None, + }; + + assert_eq!( + distribution.evaluate(1000).unwrap(), + 3, + "Function should remain clamped at min value" + ); + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/evaluate_interval.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/evaluate_interval.rs new file mode 100644 index 00000000000..ce0d595ab0c --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/evaluate_interval.rs @@ -0,0 +1,186 @@ +use crate::balances::credits::TokenAmount; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_function::DistributionFunction; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use crate::ProtocolError; + +impl DistributionFunction { + /// Evaluates the total token emission over a specified interval. + /// + /// This function calculates the sum of token emissions at discrete points between + /// `start_not_included` (exclusive) and `end_included` (inclusive), using `step` as the + /// interval between evaluations. Each evaluation is performed by calling `self.evaluate(x)` + /// at the appropriate step values. + /// + /// # Parameters + /// + /// - `start_not_included` (`u64`): + /// The block height after which emissions are considered (exclusive). + /// - `step` (`u64`): + /// The interval in blocks at which emissions occur (e.g., every 5 blocks). + /// **Must be greater than zero** to avoid division-by-zero errors. + /// - `end_included` (`u64`): + /// The final block height at which emissions should be considered (inclusive). + /// + /// # Behavior + /// + /// Given that emissions occur at regular block intervals, this function iterates through + /// block heights that are spaced by `step`, beginning at `start_not_included + step` and + /// stopping at or before `end_included`. + /// The token emissions at each step are retrieved using `self.evaluate(x)`, and their sum + /// is returned. + /// + /// # Returns + /// + /// - `Ok(TokenAmount)`: The total sum of emissions over the interval. + /// - `Err(ProtocolError)`: If an evaluation results in an error, such as an overflow or + /// invalid operation. + /// + /// # Errors + /// + /// - `ProtocolError::DivideByZero`: If `step` is zero. + /// - `ProtocolError::Overflow`: If the accumulated sum exceeds the maximum allowable value. + /// - Any error that `self.evaluate(x)` might return. + /// + pub fn evaluate_interval( + &self, + start_not_included: u64, + step: u64, + end_included: u64, + ) -> Result { + if step == 0 { + return Err(ProtocolError::DivideByZero( + "evaluate_interval: step cannot be zero".into(), + )); + } + if end_included <= start_not_included { + return Ok(0); + } + + let mut total: u64 = 0; + // Begin at the first period after start_not_included by adding 'step'. + let mut x = start_not_included + step; + while x <= end_included { + // Call evaluate(x) and accumulate the result. + total = total.checked_add(self.evaluate(x)?).ok_or_else(|| { + ProtocolError::Overflow("Total evaluation overflow in evaluate_interval".into()) + })?; + x += step; + } + Ok(total) + } + + /// Evaluates the total token emission over a specified interval, clamped within additional bounds. + /// + /// This function calculates the sum of token emissions by invoking `self.evaluate(x)` at discrete + /// points that lie between `start_not_included` (exclusive) and `end_included` (inclusive), stepping by + /// `step`. In addition, only evaluation points that also fall within the optional bounds `start_bounds_included` + /// (inclusive) and `end_bounds_included` (inclusive) are considered. + /// + /// # Parameters + /// + /// - `start_not_included` (`RewardDistributionMoment`): + /// The moment after which emissions are considered (exclusive). + /// - `step` (`RewardDistributionMoment`): + /// The interval step between evaluations. **Must be greater than zero**. + /// - `end_included` (`RewardDistributionMoment`): + /// The final moment at which emissions are considered (inclusive). + /// - `start_bounds_included` (`Option`): + /// An optional lower bound for evaluation. Only evaluation points ≥ this value will be included. + /// - `end_bounds_included` (`Option`): + /// An optional upper bound for evaluation. Only evaluation points ≤ this value will be included. + /// + /// # Type Consistency + /// + /// This function **requires all input values to be of the same variant** (`BlockBasedMoment`, `TimeBasedMoment`, or `EpochBasedMoment`). + /// If a mismatch occurs, the function returns an error. + /// + /// # Returns + /// - `Ok(TokenAmount)`: The total sum of token emissions over all valid evaluation points. + /// - `Err(ProtocolError)`: If any evaluation fails (e.g., type mismatch, overflow, division-by-zero, or if + /// `step` is zero). + /// + /// # Behavior + /// The function computes the effective start point as the larger of: + /// - `start_not_included + step` (i.e., the first natural evaluation point). + /// - `start_bounds_included` (if provided). + /// + /// Similarly, the effective end point is computed as the smaller of: + /// - `end_included`. + /// - `end_bounds_included` (if provided). + /// + /// It then iterates over these evaluation points, accumulating the token amounts. + pub fn evaluate_interval_in_bounds( + &self, + start_not_included: RewardDistributionMoment, + step: RewardDistributionMoment, + end_included: RewardDistributionMoment, + start_bounds_included: Option, + end_bounds_included: Option, + ) -> Result { + // Ensure that all moments are of the same type. + if !(start_not_included.same_type(&step) + && start_not_included.same_type(&end_included) + && start_bounds_included + .as_ref() + .map_or(true, |b| start_not_included.same_type(b)) + && end_bounds_included + .as_ref() + .map_or(true, |b| start_not_included.same_type(b))) + { + return Err(ProtocolError::AddingDifferentTypes( + "Mismatched RewardDistributionMoment types".to_string(), + )); + } + + if step == 0u64 { + return Err(ProtocolError::InvalidDistributionStep( + "evaluate_interval_in_bounds: step cannot be zero".into(), + )); + } + if end_included <= start_not_included { + return Ok(0); + } + + // The first natural evaluation point is start_not_included + step. + let first_point = (start_not_included + step)?; + // Determine the effective starting point: the larger of first_point and start_bounds_included (if provided). + let effective_start = if let Some(lb) = start_bounds_included { + if lb > first_point { + lb + } else { + first_point + } + } else { + first_point + }; + + // Determine the effective ending point: the smallest of end_included and end_bounds_included (if provided). + let effective_end = if let Some(ub) = end_bounds_included { + if ub < end_included { + ub + } else { + end_included + } + } else { + end_included + }; + + if effective_end < effective_start { + return Ok(0); + } + + let mut total: u64 = 0; + let mut x = effective_start; + while x <= effective_end { + total = total + .checked_add(self.evaluate(x.to_u64())?) + .ok_or_else(|| { + ProtocolError::Overflow( + "Total evaluation overflow in evaluate_interval_in_bounds".into(), + ) + })?; + x = (x + step)?; + } + Ok(total) + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/mod.rs index 134f906cf00..26c0b3d5098 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/mod.rs @@ -1,240 +1,690 @@ -use crate::balances::credits::{SignedTokenAmount, TokenAmount}; -use ordered_float::NotNan; +use crate::balances::credits::TokenAmount; use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; use std::fmt; mod encode; +mod evaluate; +mod evaluate_interval; +mod validation; + +pub const MAX_DISTRIBUTION_PARAM: u64 = 281_474_976_710_655; //u48::Max 2^48 - 1 #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, PartialOrd)] pub enum DistributionFunction { - /// A fixed amount of tokens is emitted for each period in the reward distribution type. + /// Emits a constant (fixed) number of tokens for every period. /// /// # Formula - /// - `f(x) = n` + /// For any period `x`, the emitted tokens are: + /// + /// ```text + /// f(x) = n + /// ``` /// /// # Use Case - /// - Simplicity - /// - Stable reward emissions + /// - When a predictable, unchanging reward is desired. + /// - Simplicity and stable emissions. /// /// # Example - /// - If we emit 5 tokens per block, and 3 blocks have passed, the `Release` call will release 15 tokens. - FixedAmount { n: TokenAmount }, + /// - If `n = 5` tokens per block, then after 3 blocks the total emission is 15 tokens. + FixedAmount { amount: TokenAmount }, - /// The amount of tokens decreases in predefined steps at fixed intervals. + /// Emits a random number of tokens within a specified range. + /// + /// # Description + /// - This function selects a **random** token emission amount between `min` and `max`. + /// - The value is drawn **uniformly** between the bounds. + /// - The randomness uses a Pseudo Random Function (PRF) from x. /// /// # Formula - /// - `f(x) = n * (1 - decrease_per_interval)^(x / step_count)` + /// For any period `x`, the emitted tokens follow: /// - /// # Use Case - /// - Mimics Bitcoin and Dash Core emission models - /// - Encourages early participation by providing higher rewards initially + /// ```text + /// f(x) ∈ [min, max] + /// ``` + /// + /// # Parameters + /// - `min`: The **minimum** possible number of tokens emitted. + /// - `max`: The **maximum** possible number of tokens emitted. + /// + /// # Use Cases + /// - **Stochastic Rewards**: Introduces randomness into rewards to incentivize unpredictability. + /// - **Lottery-Based Systems**: Used for randomized emissions, such as block rewards with probabilistic payouts. /// /// # Example - /// - Bitcoin: A 50% decrease every 210,000 blocks (~4 years) - /// - Dash: A ~7% decrease every 210,000 blocks (~1 year) - StepDecreasingAmount { - step_count: u64, - decrease_per_interval: NotNan, - n: TokenAmount, - }, + /// Suppose a system emits **between 10 and 100 tokens per period**. + /// + /// ```text + /// Random { min: 10, max: 100 } + /// ``` + /// + /// | Period (x) | Emitted Tokens (Random) | + /// |------------|------------------------| + /// | 1 | 27 | + /// | 2 | 94 | + /// | 3 | 63 | + /// | 4 | 12 | + /// + /// - Each period, the function emits a **random number of tokens** between `min = 10` and `max = 100`. + /// - Over time, the **average reward trends toward the midpoint** `(min + max) / 2`. + /// + /// # Constraints + /// - **`min` must be ≤ `max`**, otherwise the function is invalid. + /// - If `min == max`, this behaves like a `FixedAmount` function with a constant emission. + Random { min: TokenAmount, max: TokenAmount }, - /// A linear function emits tokens in increasing or decreasing amounts over time (integer precision). + /// Emits tokens that decrease in discrete steps at fixed intervals. /// /// # Formula - /// - `f(x) = a * x + b` - /// - Where `a` is the slope (rate of change) and `b` is the initial value. + /// For a given period `x`, the emission is calculated as: /// - /// # Description - /// - `a > 0`: Tokens increase over time. - /// - `a < 0`: Tokens decrease over time. - /// - `b` is the starting emission value. + /// ```text + /// f(x) = n * (1 - (decrease_per_interval_numerator / decrease_per_interval_denominator))^((x - s) / step_count) + /// ``` + /// + /// # Parameters + /// - `step_count`: The number of periods between each step. + /// - `decrease_per_interval_numerator` and `decrease_per_interval_denominator`: Define the reduction factor per step. + /// - `s`: Optional start period offset (e.g., start block or time). If not provided, the contract creation start is used. + /// - `n`: The initial token emission. + /// - `min_value`: Optional minimum emission value. /// /// # Use Case - /// - Incentivize early adopters with higher rewards (`a < 0`). - /// - Gradually increase emissions to match ecosystem growth (`a > 0`). + /// - Modeling reward systems similar to Bitcoin or Dash Core. + /// - Encouraging early participation by providing higher rewards initially. /// /// # Example - /// - Start with 50 tokens and increase by 10 tokens per epoch: `f(x) = 10x + 50`. - LinearInteger { a: i64, b: SignedTokenAmount }, + /// - Bitcoin-style: 50% reduction every 210,000 blocks. + /// - Dash-style: Approximately a 7% reduction every 210,000 blocks. + StepDecreasingAmount { + step_count: u32, + decrease_per_interval_numerator: u16, + decrease_per_interval_denominator: u16, + s: Option, + n: TokenAmount, + min_value: Option, + }, - /// A linear function emits tokens in increasing or decreasing amounts over time (floating-point precision). - /// - /// # Formula - /// - `f(x) = a * x + b` - /// - Where `a` is the slope (rate of change) and `b` is the initial value. + /// Emits tokens in fixed amounts for predefined intervals (steps). /// - /// # Description - /// - `a > 0`: Tokens increase over time. - /// - `a < 0`: Tokens decrease over time. - /// - `b` is the starting emission value. + /// # Details + /// - Within each step, the emission remains constant. + /// - The keys in the `BTreeMap` represent the starting period for each interval, + /// and the corresponding values are the fixed token amounts to emit during that interval. /// /// # Use Case - /// - Similar to `LinearInteger`, but supports fractional rates of change. + /// - Adjusting rewards at specific milestones or time intervals. /// /// # Example - /// - Start with 50 tokens and increase by 0.5 tokens per epoch: `f(x) = 0.5x + 50`. - LinearFloat { - a: NotNan, - b: SignedTokenAmount, - }, + /// - Emit 100 tokens per block for the first 1,000 blocks, then 50 tokens per block thereafter. + Stepwise(BTreeMap), - /// A polynomial function emits tokens according to a quadratic or cubic curve (integer precision). + /// Emits tokens following a linear function that can increase or decrease over time + /// with fractional precision. /// /// # Formula - /// - `f(x) = a * x^n + b` - /// - Where `n` is the degree of the polynomial, `a` is the scaling factor, and `b` is the base amount. + /// The emission at period `x` is given by: /// - /// # Description - /// - Higher-degree polynomials allow for flexible emission curves. - /// - Use for growth or decay patterns that aren't linear. + /// ```text + /// f(x) = (a * (x - s) / d) + b + /// ``` /// - /// # Use Case - /// - Reward systems with diminishing returns as time progresses. + /// # Parameters + /// - `a`: The slope numerator; determines the rate of change. + /// - `d`: The slope divisor; together with `a` controls the fractional rate. + /// - `s`: Optional start period offset. If not set, the contract creation start is assumed. + /// - `b`: The initial token emission (offset). + /// - `min_value` / `max_value`: Optional bounds to clamp the emission. /// - /// # Example - /// - Emit rewards based on a quadratic curve: `f(x) = 2x^2 + 20`. - PolynomialInteger { + /// # Details + /// - If `a > 0`, emissions increase over time. + /// - If `a < 0`, emissions decrease over time. + /// + /// # Behavior + /// - **If `a > 0`**, emissions increase linearly over time. + /// - **If `a < 0`**, emissions decrease linearly over time. + /// - **If `a = 0`**, emissions remain constant at `b`. + /// + /// # Use Cases + /// - **Predictable Inflation or Deflation:** A simple mechanism to adjust token supply dynamically. + /// - **Long-Term Incentive Structures:** Ensures steady and measurable growth or reduction of rewards. + /// - **Decaying Emissions:** Can be used to gradually taper off token rewards over time. + /// - **Sustained Growth Models:** Encourages prolonged engagement by steadily increasing rewards. + /// + /// # Examples + /// + /// ## **1️⃣ Increasing Linear Emission (`a > 0`)** + /// - Tokens increase by **1 token per block** starting from 10. + /// + /// ```text + /// f(x) = (1 * (x - 0) / 1) + 10 + /// ``` + /// + /// | Block (x) | f(x) (Tokens) | + /// |-----------|---------------| + /// | 0 | 10 | + /// | 1 | 11 | + /// | 2 | 12 | + /// | 3 | 13 | + /// + /// **Use Case:** Encourages continued participation by providing increasing rewards over time. + /// + /// --- + /// + /// ## **2️⃣ Decreasing Linear Emission (`a < 0`)** + /// - Tokens **start at 100 and decrease by 2 per period**. + /// + /// ```text + /// f(x) = (-2 * (x - 0) / 1) + 100 + /// ``` + /// + /// | Block (x) | f(x) (Tokens) | + /// |-----------|---------------| + /// | 0 | 100 | + /// | 1 | 98 | + /// | 2 | 96 | + /// | 3 | 94 | + /// + /// **Use Case:** Suitable for deflationary models where rewards need to decrease over time. + /// + /// --- + /// + /// ## **3️⃣ Emission with a Delayed Start (`s > 0`)** + /// - **No emissions before `x = s`** (e.g., rewards start at block `10`). + /// + /// ```text + /// f(x) = (5 * (x - 10) / 1) + 50 + /// ``` + /// + /// | Block (x) | f(x) (Tokens) | + /// |-----------|---------------| + /// | 9 | 50 (no change)| + /// | 10 | 50 | + /// | 11 | 55 | + /// | 12 | 60 | + /// + /// **Use Case:** Useful when rewards should only begin at a specific milestone. + /// + /// --- + /// + /// ## **4️⃣ Clamping Emissions with `min_value` and `max_value`** + /// - **Start at 50, increase by 2, but never exceed 60.** + /// + /// ```text + /// f(x) = (2 * (x - 0) / 1) + 50 + /// ``` + /// + /// | Block (x) | f(x) (Tokens) | + /// |-----------|---------------| + /// | 0 | 50 | + /// | 1 | 52 | + /// | 2 | 54 | + /// | 5 | 60 (max cap) | + /// + /// **Use Case:** Prevents runaway inflation by limiting the emission range. + /// + /// --- + /// + /// # Summary + /// - **Increasing rewards (`a > 0`)**: Encourages longer participation. + /// - **Decreasing rewards (`a < 0`)**: Supports controlled deflation. + /// - **Delayed start (`s > 0`)**: Ensures rewards only begin at a specific point. + /// - **Clamping (`min_value`, `max_value`)**: Maintains controlled emission boundaries. + Linear { a: i64, - n: i64, - b: SignedTokenAmount, + d: u64, + s: Option, + b: TokenAmount, + min_value: Option, + max_value: Option, }, - /// A polynomial function emits tokens according to a quadratic or cubic curve (floating-point precision). + /// Emits tokens following a polynomial curve with integer arithmetic. /// /// # Formula - /// - `f(x) = a * x^n + b` - /// - Where `n` is the degree of the polynomial, `a` is the scaling factor, and `b` is the base amount. - /// - /// # Description - /// - Similar to `PolynomialInteger`, but supports fractional scaling and degrees. - /// - /// # Example - /// - Emit rewards based on a cubic curve with fractional growth: `f(x) = 0.5x^3 + 20`. - PolynomialFloat { - a: NotNan, - n: NotNan, - b: SignedTokenAmount, + /// The emission at period `x` is given by: + /// + /// ```text + /// f(x) = (a * (x - s + o)^(m/n)) / d + b + /// ``` + /// + /// # Parameters + /// - `a`: Scaling factor for the polynomial term. + /// - `m` and `n`: Together specify the exponent as a rational number (allowing non-integer exponents). + /// - `d`: A divisor for scaling. + /// - `s`: Optional start period offset. If not provided, the contract creation start is used. + /// - `o`: An offset for the polynomial function, this is useful if s is in None, + /// - `b`: An offset added to the computed value. + /// - `min_value` / `max_value`: Optional bounds to constrain the emission. + /// + /// # Behavior & Use Cases + /// The polynomial function's behavior depends on the values of `a` (scaling factor) and `m` (exponent numerator). + /// + /// ## **1️⃣ `a > 0`, `m > 0` (Increasing Polynomial Growth)** + /// - **Behavior**: Emissions **increase at an accelerating rate** over time. + /// - **Use Case**: Suitable for models where incentives start small and grow over time (e.g., boosting late-stage participation). + /// - **Example**: + /// ```text + /// f(x) = (2 * (x - s + o)^2) / d + 10 + /// ``` + /// - If `s = 0`, `o = 0`, and `d = 1`, then: + /// - `f(1) = 12` + /// - `f(2) = 18` + /// - `f(3) = 28` (Emissions **accelerate over time**) + /// + /// ## **2️⃣ `a > 0`, `m < 0` (Decreasing Polynomial Decay)** + /// - **Behavior**: Emissions **start high and gradually decline**. + /// - **Use Case**: Useful for front-loaded incentives where rewards are larger at the beginning and taper off over time. + /// - **Example**: + /// ```text + /// f(x) = (5 * (x - s + o)^(-1)) / d + 10 + /// ``` + /// - If `s = 0`, `o = 0`, and `d = 1`, then: + /// - `f(1) = 15` + /// - `f(2) = 12.5` + /// - `f(3) = 11.67` (Emissions **shrink but never hit zero**) + /// + /// ## **3️⃣ `a < 0`, `m > 0` (Inverted Growth → Decreasing Over Time)** + /// - **Behavior**: Emissions **start large but decrease faster over time**. + /// - **Use Case**: Suitable for cases where high initial incentives quickly drop off (e.g., limited early rewards). + /// - **Example**: + /// ```text + /// f(x) = (-3 * (x - s + o)^2) / d + 50 + /// ``` + /// - If `s = 0`, `o = 0`, and `d = 1`, then: + /// - `f(1) = 47` + /// - `f(2) = 38` + /// - `f(3) = 23` (Emissions **fall sharply**) + /// + /// ## **4️⃣ `a < 0`, `m < 0` (Inverted Decay → Slowing Increase)** + /// - **Behavior**: Emissions **start low, rise gradually, and then flatten out**. + /// - **Use Case**: Useful for controlled inflation where rewards increase over time but approach a stable maximum. + /// - **Example**: + /// ```text + /// f(x) = (-10 * (x - s + o)^(-2)) / d + 50 + /// ``` + /// - If `s = 0`, `o = 0`, and `d = 1`, then: + /// - `f(1) = 40` + /// - `f(2) = 47.5` + /// - `f(3) = 48.89` (Growth **slows as it approaches 50**) + /// + /// # Summary + /// - **Positive `a` means increasing emissions**, while **negative `a` means decreasing emissions**. + /// - **Positive `m` leads to growth**, while **negative `m` leads to decay**. + /// - The combination of `a` and `m` defines whether emissions accelerate, decay, or remain stable. + Polynomial { + a: i64, + d: u64, + m: i64, + n: u64, + o: i64, + s: Option, + b: TokenAmount, + min_value: Option, + max_value: Option, }, - /// An exponential function emits tokens based on exponential growth or decay. + /// Emits tokens following an exponential function. /// /// # Formula - /// - `f(x) = a * e^(b * x) + c` - /// - Where `a` is the scaling factor, `b` controls the growth/decay rate, and `c` is an offset. - /// - /// # Description - /// - Exponential growth: `b > 0`, emissions increase rapidly. - /// - Exponential decay: `b < 0`, emissions decrease rapidly. - /// - Useful for early incentivization or ecosystem maturity. - /// - /// # Use Case - /// - Reward mechanisms where early contributors get larger rewards. - /// - /// # Example - /// - Start with 100 tokens and halve emissions every interval, with a minimum of 5 tokens: `f(x) = 100 * e^(-0.693 * x) + 5`. + /// The emission at period `x` is given by: + /// + /// ```text + /// f(x) = (a * e^(m * (x - s) / n)) / d + c + /// ``` + /// + /// # Parameters + /// - `a`: The scaling factor. + /// - `m` and `n`: Define the exponent rate (with `m > 0` for growth and `m < 0` for decay). + /// - `d`: A divisor used to scale the exponential term. + /// - `s`: Optional start period offset. If not set, the contract creation start is assumed. + /// - `o`: An offset for the exp function, this is useful if s is in None. + /// - `c`: An offset added to the result. + /// - `min_value` / `max_value`: Optional constraints on the emitted tokens. + /// + /// # Use Cases + /// ## **Exponential Growth (`m > 0`):** + /// - **Incentivized Spending**: Higher emissions over time increase the circulating supply, encouraging users to spend tokens. + /// - **Progressive Emission Models**: Useful for models where early emissions are low but increase significantly over time. + /// - **Early-Stage Adoption Strategies**: Helps drive later participation by offering increasing rewards as time progresses. + /// + /// ## **Exponential Decay (`m < 0`):** + /// - **Deflationary Reward Models**: Reduces emissions over time, ensuring token scarcity. + /// - **Early Participation Incentives**: Encourages early users by distributing more tokens initially and gradually decreasing rewards. + /// - **Sustainable Emission Models**: Helps manage token supply while preventing runaway inflation. + /// + /// # Examples + /// ## **Example 1: Exponential Growth (`m > 0`)** + /// - **Use Case**: A staking model where rewards increase over time to encourage long-term participation. + /// - **Parameters**: `a = 100`, `m = 2`, `n = 50`, `d = 10`, `c = 5` + /// - **Formula**: + /// ```text + /// f(x) = (100 * e^(2 * (x - s) / 50)) / 10 + 5 + /// ``` + /// - **Effect**: Emissions start small but **increase exponentially** over time, rewarding late stakers more than early ones. + /// + /// ## **Example 2: Exponential Decay (`m < 0`)** + /// - **Use Case**: A deflationary model where emissions start high and gradually decrease to ensure scarcity. + /// - **Parameters**: `a = 500`, `m = -3`, `n = 100`, `d = 20`, `c = 10` + /// - **Formula**: + /// ```text + /// f(x) = (500 * e^(-3 * (x - s) / 100)) / 20 + 10 + /// ``` + /// - **Effect**: Emissions start **high and decay exponentially**, ensuring early participants get larger rewards. Exponential { - a: NotNan, - b: NotNan, - c: SignedTokenAmount, + a: u64, + d: u64, + m: i64, + n: u64, + o: i64, + s: Option, + c: TokenAmount, + min_value: Option, + max_value: Option, }, - /// A logarithmic function emits tokens based on logarithmic growth. + /// Emits tokens following a logarithmic function. /// /// # Formula - /// - `f(x) = a * log_b(x) + c` - /// - Where `a` is the scaling factor, `b` is the logarithm base, and `c` is an offset. + /// The emission at period `x` is computed as: /// - /// # Description - /// - Growth starts quickly but slows as `x` increases. - /// - Suitable for sustainable emissions over long periods. + /// ```text + /// f(x) = (a * log(m * (x - s + o) / n)) / d + b + /// ``` + /// + /// # Parameters + /// - `a`: Scaling factor for the logarithmic term. + /// - `d`: A divisor for scaling. + /// - `m` and `n`: Adjust the input to the logarithm function. + /// - `s`: Optional start period offset. If not provided, the contract creation start is used. + /// - `o`: An offset for the log function, this is useful if s is in None. + /// - `b`: An offset added to the result. + /// - `min_value` / `max_value`: Optional bounds to ensure the emission remains within limits. /// /// # Use Case - /// - Gradual emissions tapering to balance supply and demand. + /// - **Gradual Growth with a Slowing Rate**: Suitable for reward schedules where the emission + /// starts at a lower rate, increases quickly at first, but then slows down over time. + /// - **Predictable Emission Scaling**: Ensures a growing but controlled emission curve that + /// does not escalate too quickly. + /// - **Sustainability and Inflation Control**: Helps prevent runaway token supply growth + /// by ensuring rewards increase at a decreasing rate. /// /// # Example - /// - Emit rewards using a log base-2 curve: `f(x) = 20 * log_2(x) + 5`. + /// - Suppose we want token emissions to start at a low value and grow over time, but at a + /// **decreasing rate**, ensuring controlled long-term growth. + /// + /// - Given the formula: + /// ```text + /// f(x) = (a * log(m * (x - s + o) / n)) / d + b + /// ``` + /// + /// - Let’s assume the following parameters: + /// - `a = 100`: Scaling factor. + /// - `d = 10`: Divisor to control overall scaling. + /// - `m = 2`, `n = 1`: Adjust the logarithmic input. + /// - `s = 0`, `o = 1`: Starting conditions. + /// - `b = 50`: Base amount added. + /// + /// - This results in: + /// ```text + /// f(x) = (100 * log(2 * (x + 1) / 1)) / 10 + 50 + /// ``` + /// + /// - **Expected Behavior:** + /// - At `x = 1`, emission = `f(1) = (100 * log(4)) / 10 + 50 ≈ 82` + /// - At `x = 10`, emission = `f(10) = (100 * log(22)) / 10 + 50 ≈ 106` + /// - At `x = 100`, emission = `f(100) = (100 * log(202)) / 10 + 50 ≈ 130` + /// + /// - **Observations:** + /// - The emission **increases** over time, but at a **slowing rate**. + /// - Early increases are more pronounced, but as `x` grows, the additional reward per + /// period gets smaller. + /// - This makes it ideal for long-term, controlled emission models. Logarithmic { - a: NotNan, - b: NotNan, - c: SignedTokenAmount, + a: i64, + d: u64, + m: u64, + n: u64, + o: i64, + s: Option, + b: TokenAmount, + min_value: Option, + max_value: Option, }, - - /// A stepwise function emits tokens in fixed amounts for predefined intervals. + /// Emits tokens following an inverted logarithmic function. /// - /// # Description - /// - Emissions remain constant within each step. - /// - Steps define specific time intervals or milestones. + /// # Formula + /// The emission at period `x` is given by: + /// + /// ```text + /// f(x) = (a * log( n / (m * (x - s + o)) )) / d + b + /// ``` + /// + /// # Parameters + /// - `a`: Scaling factor. + /// - `d`: Divisor for scaling. + /// - `m` and `n`: Together control the logarithm argument inversion. + /// - `o`: Offset applied inside the logarithm. + /// - `s`: Optional start period offset. + /// - `b`: Offset added to the computed value. + /// - `min_value` / `max_value`: Optional boundaries for the emission. /// /// # Use Case - /// - Adjust rewards at specific milestones. + /// - **Gradual Decay of Rewards**: Suitable when early adopters should receive higher rewards, + /// but later participants should receive smaller but still meaningful amounts. + /// - **Resource Draining / Controlled Burn**: Used when token emissions should drop significantly + /// at first but slow down over time to preserve capital. + /// - **Airdrop or Grant System**: Ensures early claimants receive larger distributions, but later + /// claimants receive diminishing rewards. /// /// # Example - /// - Emit 100 tokens per block for the first 1000 blocks, then 50 tokens thereafter. - Stepwise(Vec<(u64, TokenAmount)>), + /// - Suppose a system starts with **500 tokens per period** and gradually reduces over time: + /// + /// ```text + /// f(x) = (1000 * log(5000 / (5 * (x - 1000)))) / 10 + 10 + /// ``` + /// + /// Example values: + /// + /// | Period (x) | Emission (f(x)) | + /// |------------|----------------| + /// | 1000 | 500 tokens | + /// | 1500 | 230 tokens | + /// | 2000 | 150 tokens | + /// | 5000 | 50 tokens | + /// | 10,000 | 20 tokens | + /// | 50,000 | 10 tokens | + /// + /// - The emission **starts high** and **gradually decreases**, ensuring early adopters receive + /// more tokens while later participants still get rewards. + /// - The function **slows down the rate of decrease** over time, preventing emissions from + /// hitting zero too quickly. + InvertedLogarithmic { + a: i64, + d: u64, + m: u64, + n: u64, + o: i64, + s: Option, + b: TokenAmount, + min_value: Option, + max_value: Option, + }, } impl fmt::Display for DistributionFunction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - DistributionFunction::FixedAmount { n } => { + DistributionFunction::FixedAmount { amount: n } => { write!(f, "FixedAmount: {} tokens per period", n) } + DistributionFunction::Random { min, max } => { + write!(f, "Random: tokens ∈ [{}, {}] per period", min, max) + } DistributionFunction::StepDecreasingAmount { step_count, - decrease_per_interval, + decrease_per_interval_numerator, + decrease_per_interval_denominator, + s, n, + min_value, } => { write!( f, - "StepDecreasingAmount: {} tokens, decreasing by {:.3}% every {} steps", + "StepDecreasingAmount: {} tokens, decreasing by {}/{} every {} steps", n, - decrease_per_interval.into_inner() * 100.0, + decrease_per_interval_numerator, + decrease_per_interval_denominator, step_count - ) + )?; + if let Some(start) = s { + write!(f, " starting at period {}", start)?; + } + if let Some(min) = min_value { + write!(f, ", with a minimum emission of {}", min)?; + } + Ok(()) } - DistributionFunction::LinearInteger { a, b } => { - write!(f, "LinearInteger: f(x) = {} * x + {}", a, b) + DistributionFunction::Stepwise(steps) => { + write!(f, "Stepwise emission: ")?; + let mut first = true; + for (step, amount) in steps { + if !first { + write!(f, ", ")?; + } + first = false; + write!(f, "[Step {} → {} tokens]", step, amount)?; + } + Ok(()) } - DistributionFunction::LinearFloat { a, b } => { - write!(f, "LinearFloat: f(x) = {:.3} * x + {}", a.into_inner(), b) + DistributionFunction::Linear { + a, + d, + s, + b, + min_value, + max_value, + } => { + write!(f, "Linear: f(x) = {} * (x", a)?; + if let Some(start) = s { + write!(f, " - {})", start)?; + } else { + write!(f, ")")?; + } + write!(f, " / {}) + {}", d, b)?; + if let Some(min) = min_value { + write!(f, ", min: {}", min)?; + } + if let Some(max) = max_value { + write!(f, ", max: {}", max)?; + } + Ok(()) } - DistributionFunction::PolynomialInteger { a, n, b } => { - write!(f, "PolynomialInteger: f(x) = {} * x^{} + {}", a, n, b) + DistributionFunction::Polynomial { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + write!(f, "Polynomial: f(x) = {} * (x", a)?; + if let Some(start) = s { + write!(f, " - {} + {})", start, o)?; + } else { + write!(f, " + {})", o)?; + } + write!(f, "^( {} / {} ) / {} + {}", m, n, d, b)?; + if let Some(min) = min_value { + write!(f, ", min: {}", min)?; + } + if let Some(max) = max_value { + write!(f, ", max: {}", max)?; + } + Ok(()) } - DistributionFunction::PolynomialFloat { a, n, b } => { - write!( - f, - "PolynomialFloat: f(x) = {:.3} * x^{:.3} + {}", - a.into_inner(), - n.into_inner(), - b - ) + DistributionFunction::Exponential { + a, + d, + m, + n, + o, + s, + c, + min_value, + max_value, + } => { + write!(f, "Exponential: f(x) = {} * e^( {} * (x", a, m)?; + if let Some(start) = s { + write!(f, " - {} + {})", start, o)?; + } else { + write!(f, " + {})", o)?; + } + write!(f, " / {} ) / {} + {}", n, d, c)?; + if let Some(min) = min_value { + write!(f, ", min: {}", min)?; + } + if let Some(max) = max_value { + write!(f, ", max: {}", max)?; + } + Ok(()) } - DistributionFunction::Exponential { a, b, c } => { - write!( - f, - "Exponential: f(x) = {:.3} * e^({:.3} * x) + {}", - a.into_inner(), - b.into_inner(), - c - ) + DistributionFunction::Logarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + write!(f, "Logarithmic: f(x) = {} * log( {} * (x", a, m)?; + if let Some(start) = s { + write!(f, " - {} + {})", start, o)?; + } else { + write!(f, " + {})", o)?; + } + write!(f, " / {} ) / {} + {}", n, d, b)?; + if let Some(min) = min_value { + write!(f, ", min: {}", min)?; + } + if let Some(max) = max_value { + write!(f, ", max: {}", max)?; + } + Ok(()) } - DistributionFunction::Logarithmic { a, b, c } => { + DistributionFunction::InvertedLogarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { write!( f, - "Logarithmic: f(x) = {:.3} * log_{:.3}(x) + {}", - a.into_inner(), - b.into_inner(), - c - ) - } - DistributionFunction::Stepwise(steps) => { - write!(f, "Stepwise: ")?; - for (index, (step, amount)) in steps.iter().enumerate() { - if index > 0 { - write!(f, ", ")?; - } - write!(f, "Step {} -> {}", step, amount)?; + "InvertedLogarithmic: f(x) = {} * log( {} / ({} * (x", + a, n, m + )?; + if let Some(start) = s { + write!(f, " - {} + {})", start, o)?; + } else { + write!(f, " + {})", o)?; + } + write!(f, ") ) / {} + {}", d, b)?; + if let Some(min) = min_value { + write!(f, ", min: {}", min)?; + } + if let Some(max) = max_value { + write!(f, ", max: {}", max)?; } Ok(()) } diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/validation.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/validation.rs new file mode 100644 index 00000000000..bc720767b46 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_function/validation.rs @@ -0,0 +1,2344 @@ +use crate::consensus::basic::data_contract::{ + InvalidTokenDistributionFunctionDivideByZeroError, + InvalidTokenDistributionFunctionIncoherenceError, + InvalidTokenDistributionFunctionInvalidParameterError, + InvalidTokenDistributionFunctionInvalidParameterTupleError, +}; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_function::{ + DistributionFunction, MAX_DISTRIBUTION_PARAM, +}; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; +impl DistributionFunction { + pub fn validate( + &self, + start_moment: u64, + ) -> Result { + match self { + DistributionFunction::FixedAmount { amount: n } => { + // Validate that n is > 0 and does not exceed u32::MAX. + if *n == 0 || *n > u32::MAX as u64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "n".to_string(), + 1, + u32::MAX as i64, + None, + ) + .into(), + )); + } + } + DistributionFunction::Random { min, max } => { + // Ensure that `min` is not greater than `max` + if *min > *max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "min".to_string(), + "max".to_string(), + "smaller than or equal to".to_string(), + ) + .into(), + )); + } + + // Ensure that `max` is within valid bounds + if *max > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "max".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + DistributionFunction::StepDecreasingAmount { + step_count, + decrease_per_interval_numerator, + decrease_per_interval_denominator, + s, + n, + min_value, + } => { + // Validate n. + if *n == 0 || *n > u32::MAX as u64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "n".to_string(), + 1, + u32::MAX as i64, + None, + ) + .into(), + )); + } + if *step_count == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *decrease_per_interval_denominator == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *decrease_per_interval_numerator >= *decrease_per_interval_denominator { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "decrease_per_interval_numerator".to_string(), + "decrease_per_interval_denominator".to_string(), + "smaller than".to_string(), + ) + .into(), + )); + } + if let Some(min) = min_value { + if *n < *min { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "n".to_string(), + "min_value".to_string(), + "greater than or equal to".to_string(), + ) + .into(), + )); + } + } + + if let Some(s) = s { + if *s > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "s".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + } + + DistributionFunction::Stepwise(steps) => { + // Ensure at least two distinct steps. + if steps.is_empty() || steps.len() == 1 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "steps".to_string(), + 2, + u16::MAX as i64, + None, + ) + .into(), + )); + } + } + // f(x) = (a * (x - s) / d) + b + DistributionFunction::Linear { + a, + d, + s, + b, + min_value, + max_value, + } => { + if *d == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *a == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "a".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + Some(0), + ) + .into(), + )); + } + + if *a > MAX_DISTRIBUTION_PARAM as i64 || *a < -(MAX_DISTRIBUTION_PARAM as i64) { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "a".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if let (Some(min), Some(max)) = (min_value, max_value) { + if min > max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "min_value".to_string(), + "max_value".to_string(), + "smaller than or equal to".to_string(), + ) + .into(), + )); + } + } + + if let Some(s) = s { + if *s > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "s".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + if let Some(max) = max_value { + if *max > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "max".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + let start_token_amount = DistributionFunction::Linear { + a: *a, + d: *d, + s: Some(s.unwrap_or(start_moment)), + b: *b, + min_value: *min_value, + max_value: *max_value, + } + .evaluate(start_moment)?; + + if *a > 0 { + // we want to put in the max value to see if we are starting off at the max + // value. + // if we are starting at the max value there's no point at doing a linear function + if let Some(max) = max_value { + if start_token_amount == *max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "since a is positive the linear function will increase, however it starts at the maximum value already which makes the function never used".to_string(), + ) + .into(), + )); + } + } + start_token_amount + } else { + if let Some(min) = min_value { + if start_token_amount == *min { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "since a is negative the linear function will decrease, however it starts at the minimum value which makes the function never used".to_string(), + ) + .into(), + )); + } + } + start_token_amount + }; + } + + // f(x) = (a * (x - s + o)^(m/n)) / d + b + DistributionFunction::Polynomial { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + if *d == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *n == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + + if let Some(s) = s { + if *s > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "s".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + if *o > MAX_DISTRIBUTION_PARAM as i64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if *o < -(MAX_DISTRIBUTION_PARAM as i64) { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if let Some(max) = max_value { + if *max > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "max".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + if let (Some(min), Some(max)) = (min_value, max_value) { + if min > max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "min_value".to_string(), + "max_value".to_string(), + "smaller than or equal to".to_string(), + ) + .into(), + )); + } + } + + let start_token_amount = DistributionFunction::Polynomial { + a: *a, + d: *d, + m: *m, + n: *n, + o: *o, + s: Some(s.unwrap_or(start_moment)), + b: *b, + min_value: *min_value, + max_value: *max_value, + } + .evaluate(start_moment)?; + + // Now, based on the monotonicity implied by (*a) * (*m), + // check for incoherence: + if (*a) * (*m) > 0 { + // The function is increasing. + if let Some(max) = max_value { + if start_token_amount == *max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "Since a and m imply an increasing function, but the start amount is already at the maximum, the function would never produce a higher value." + .to_string(), + ).into(), + )); + } + } + } else if (*a) * (*m) < 0 { + // The function is decreasing. + if let Some(min) = min_value { + if start_token_amount == *min { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "Since a and m imply a decreasing function, but the start amount is already at the minimum, the function would never produce a lower value." + .to_string(), + ).into(), + )); + } + } + } + } + // f(x) = (a * e^(m * (x - s + o) / n)) / d + c + DistributionFunction::Exponential { + a, + d, + m, + n, + o, + s, + c, + min_value, + max_value, + } => { + if *d == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *n == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *m == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "m".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + Some(0), + ) + .into(), + )); + } + if *a == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "a".to_string(), + 1, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if *m > 0 { + // m is positive means that we need a max value set + if max_value.is_none() { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "max_value".to_string(), + "m".to_string(), + "set if the following parameter is positive".to_string(), + ) + .into(), + )); + } + } + + if let Some(s) = s { + if *s > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "s".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + if *o > MAX_DISTRIBUTION_PARAM as i64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if *a > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "a".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if *o < -(MAX_DISTRIBUTION_PARAM as i64) { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if let Some(max) = max_value { + if *max > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "max".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + if let (Some(min), Some(max)) = (min_value, max_value) { + if min > max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "min_value".to_string(), + "max_value".to_string(), + "smaller than or equal to".to_string(), + ) + .into(), + )); + } + } + + let start_token_amount = DistributionFunction::Exponential { + a: *a, + d: *d, + m: *m, + n: *n, + o: *o, + s: Some(s.unwrap_or(start_moment)), + c: *c, + min_value: *min_value, + max_value: *max_value, + } + .evaluate(start_moment)?; + + if *m > 0 { + // we want to put in the max value to see if we are starting off at the max + // value. + // if we are starting at the max value there's no point at doing an exp + if let Some(max) = max_value { + if start_token_amount == *max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "since m is positive the exponential function will increase, however it starts at the maximum value already which makes the function never used".to_string(), + ) + .into(), + )); + } + } + start_token_amount + } else { + if let Some(min) = min_value { + if start_token_amount == *min { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "since m is negative the exponential function will decrease, however it starts at the minimum value which makes the function never used".to_string(), + ) + .into(), + )); + } + } + start_token_amount + }; + } + // f(x) = (a * log(m * (x - s + o) / n)) / d + b + DistributionFunction::Logarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + if *d == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *n == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *m == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "m".to_string(), + 1, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + if *a == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "a".to_string(), + 1, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if let Some(s) = s { + if *s > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "s".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + if let Some(max) = max_value { + if *max > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "max".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + + if *o > MAX_DISTRIBUTION_PARAM as i64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if *o < -(MAX_DISTRIBUTION_PARAM as i64) { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + + if let (Some(min), Some(max)) = (min_value, max_value) { + if min > max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "min_value".to_string(), + "max_value".to_string(), + "smaller than or equal to".to_string(), + ) + .into(), + )); + } + } + + let eval_s = s.unwrap_or(start_moment); + + if start_moment as i64 - eval_s as i64 + o <= 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "s".to_string(), + "o".to_string(), + "(x - s + o) must be bigger than 0 in f(x) = (a * log(m * (x - s + o) / n)) / d + b".to_string(), + ) + .into(), + )); + } + + let start_token_amount = DistributionFunction::Logarithmic { + a: *a, + d: *d, + m: *m, + n: *n, + o: *o, + s: Some(s.unwrap_or(start_moment)), + b: *b, + min_value: *min_value, + max_value: *max_value, + } + .evaluate(start_moment)?; + + if let Some(max) = max_value { + if start_token_amount == *max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "The log function will always increase, however it starts at the maximum value already which makes the function never used".to_string(), + ) + .into(), + )); + } + } + } + // f(x) = (a * log( n / (m * (x - s + o)) )) / d + b + DistributionFunction::InvertedLogarithmic { + a, + d, + m, + n, + o, + s, + b, + min_value, + max_value, + } => { + // Check for division by zero. + if *d == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + if *n == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "n".to_string(), + 1, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + if *m == 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionDivideByZeroError::new(self.clone()).into(), + )); + } + + // Validate s: if provided, it must not exceed MAX_DISTRIBUTION_PARAM. + if let Some(s_val) = s { + if *s_val > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "s".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + // Validate o is within allowed bounds. + if *o > MAX_DISTRIBUTION_PARAM as i64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + if *o < -(MAX_DISTRIBUTION_PARAM as i64) { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "o".to_string(), + -(MAX_DISTRIBUTION_PARAM as i64), + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + // Validate max_value if provided. + if let Some(max) = max_value { + if *max > MAX_DISTRIBUTION_PARAM { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterError::new( + "max".to_string(), + 0, + MAX_DISTRIBUTION_PARAM as i64, + None, + ) + .into(), + )); + } + } + // If both min_value and max_value are provided, ensure min_value <= max_value. + if let (Some(min), Some(max)) = (min_value, max_value) { + if min > max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "min_value".to_string(), + "max_value".to_string(), + "smaller than or equal to".to_string(), + ) + .into(), + )); + } + } + + // Use the provided s or default to start_moment. + let start_s = s.unwrap_or(start_moment); + // Ensure the argument for the logarithm is > 0: + if (start_moment as i64 - start_s as i64 + *o) <= 0 { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionInvalidParameterTupleError::new( + "s".to_string(), + "o".to_string(), + "(x - s + o) must be > 0 in f(x) = (a * ln(n / (m * (x - s + o)))) / d + b".to_string(), + ) + .into(), + )); + } + + // Evaluate the function at the starting moment. + let start_token_amount = DistributionFunction::InvertedLogarithmic { + a: *a, + d: *d, + m: *m, + n: *n, + o: *o, + s: Some(start_s), + b: *b, + min_value: *min_value, + max_value: *max_value, + } + .evaluate(start_moment)?; + + // Determine the function's monotonicity. + // For InvertedLogarithmic, f'(x) = -a / (d * (x - s + o)). + // Hence, if a > 0, the function is decreasing; + // if a < 0, the function is increasing. + if *a > 0 { + // For a decreasing function, if the start amount is already at min_value, + // the function would never decrease further. + if let Some(min) = min_value { + if start_token_amount == *min { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "Since a is positive, the inverted logarithmic function is decreasing, but it starts at the minimum value already, so it will never produce a lower value.".to_string(), + ) + .into(), + )); + } + } + } else if *a < 0 { + // For an increasing function, if the start amount is already at max_value, + // the function would never increase further. + if let Some(max) = max_value { + if start_token_amount == *max { + return Ok(SimpleConsensusValidationResult::new_with_error( + InvalidTokenDistributionFunctionIncoherenceError::new( + "Since a is negative, the inverted logarithmic function is increasing, but it starts at the maximum value already, so it will never produce a higher value.".to_string(), + ) + .into(), + )); + } + } + } + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} +#[cfg(test)] +mod tests { + use super::*; + use std::collections::BTreeMap; + + const START_MOMENT: u64 = 4000; + mod fixed_amount { + use super::*; + #[test] + fn test_fixed_amount_valid() { + let dist = DistributionFunction::FixedAmount { amount: 100 }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_fixed_amount_valid") + .first_error() + .is_none()); + } + + #[test] + fn test_fixed_amount_zero_invalid() { + let dist = DistributionFunction::FixedAmount { amount: 0 }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_fixed_amount_zero_invalid") + .first_error() + .is_some()); + } + + #[test] + fn test_fixed_amount_max_valid() { + let dist = DistributionFunction::FixedAmount { + amount: u32::MAX as u64, + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_fixed_amount_max_valid") + .first_error() + .is_none()); + } + + #[test] + fn test_fixed_amount_exceeds_max_invalid() { + let dist = DistributionFunction::FixedAmount { + amount: u32::MAX as u64 + 1, + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_fixed_amount_exceeds_max_invalid") + .first_error() + .is_some()); + } + } + mod step_decreasing { + use super::*; + + #[test] + fn test_step_decreasing_amount_valid() { + let dist = DistributionFunction::StepDecreasingAmount { + step_count: 10, + decrease_per_interval_numerator: 1, + decrease_per_interval_denominator: 2, + s: Some(0), + n: 100, + min_value: Some(10), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_step_decreasing_amount_valid") + .first_error() + .is_none()); + } + + #[test] + fn test_step_decreasing_amount_invalid_zero_step_count() { + let dist = DistributionFunction::StepDecreasingAmount { + step_count: 0, + decrease_per_interval_numerator: 1, + decrease_per_interval_denominator: 2, + s: Some(0), + n: 100, + min_value: Some(10), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_step_decreasing_amount_invalid_zero_step_count") + .first_error() + .is_some()); + } + + #[test] + fn test_step_decreasing_amount_invalid_zero_denominator() { + let dist = DistributionFunction::StepDecreasingAmount { + step_count: 10, + decrease_per_interval_numerator: 1, + decrease_per_interval_denominator: 0, + s: Some(0), + n: 100, + min_value: Some(10), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_step_decreasing_amount_invalid_zero_denominator") + .first_error() + .is_some()); + } + } + mod stepwise { + use super::*; + #[test] + fn test_stepwise_valid() { + let mut steps = BTreeMap::new(); + steps.insert(0, 100); + steps.insert(10, 50); + steps.insert(20, 25); + let dist = DistributionFunction::Stepwise(steps); + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_stepwise_valid") + .first_error() + .is_none()); + } + + #[test] + fn test_stepwise_invalid_single_step() { + let mut steps = BTreeMap::new(); + steps.insert(0, 100); + let dist = DistributionFunction::Stepwise(steps); + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_stepwise_invalid_single_step") + .first_error() + .is_some()); + } + } + mod linear { + use super::*; + #[test] + fn test_linear_valid() { + let dist = DistributionFunction::Linear { + a: 1, + d: 10, + s: Some(3800), + b: 100, + min_value: Some(50), + max_value: Some(150), + }; + + let result = dist.validate(START_MOMENT); + + // If the test fails, print the exact error message. + if let Err(err) = &result { + panic!("Test failed: Expected no error, but got: {:?}", err); + } + + // If validation succeeds but contains errors, print those errors. + if let Some(error) = result.expect("no error on test_linear_valid").first_error() { + panic!("Test failed: Validation error found: {:?}", error); + } + } + #[test] + fn test_linear_invalid_divide_by_zero() { + let dist = DistributionFunction::Linear { + a: 1, + d: 0, + s: Some(0), + b: 100, + min_value: Some(50), + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_linear_invalid_divide_by_zero") + .first_error() + .is_some()); + } + + #[test] + fn test_linear_invalid_s_exceeds_max() { + let dist = DistributionFunction::Linear { + a: 1, + d: 10, + s: Some(MAX_DISTRIBUTION_PARAM + 1), + b: 100, + min_value: Some(50), + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_linear_invalid_s_exceeds_max") + .first_error() + .is_some()); + } + + #[test] + fn test_linear_invalid_a_zero() { + let dist = DistributionFunction::Linear { + a: 0, // Invalid: a cannot be zero + d: 10, + s: Some(0), + b: 100, + min_value: Some(50), + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_linear_invalid_a_zero") + .first_error() + .is_some(), + "Expected error: a cannot be zero" + ); + } + + #[test] + fn test_linear_invalid_a_too_large() { + let dist = DistributionFunction::Linear { + a: MAX_DISTRIBUTION_PARAM as i64 + 1, // Invalid: a exceeds allowed range + d: 10, + s: Some(0), + b: 100, + min_value: Some(50), + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_linear_invalid_a_too_large") + .first_error() + .is_some(), + "Expected error: a exceeds MAX_DISTRIBUTION_PARAM" + ); + } + + #[test] + fn test_linear_invalid_min_greater_than_max() { + let dist = DistributionFunction::Linear { + a: 1, + d: 10, + s: Some(0), + b: 100, + min_value: Some(200), // Invalid: min > max + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_linear_invalid_min_greater_than_max") + .first_error() + .is_some(), + "Expected error: min_value > max_value" + ); + } + + #[test] + fn test_linear_invalid_s_greater_than_max() { + let dist = DistributionFunction::Linear { + a: 1, + d: 10, + s: Some(MAX_DISTRIBUTION_PARAM + 1), // Invalid: s exceeds allowed range + b: 100, + min_value: Some(50), + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_linear_invalid_s_greater_than_max") + .first_error() + .is_some(), + "Expected error: s exceeds MAX_DISTRIBUTION_PARAM" + ); + } + + #[test] + fn test_linear_invalid_max_exceeds_max_distribution_param() { + let dist = DistributionFunction::Linear { + a: 1, + d: 10, + s: Some(0), + b: 100, + min_value: Some(50), + max_value: Some(MAX_DISTRIBUTION_PARAM + 1), // Invalid: max_value exceeds max allowed range + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_linear_invalid_max_exceeds_max_distribution_param") + .first_error() + .is_some(), + "Expected error: max_value exceeds MAX_DISTRIBUTION_PARAM" + ); + } + + #[test] + fn test_linear_invalid_starting_at_max_value() { + let dist = DistributionFunction::Linear { + a: 1, + d: 10, + s: Some(0), + b: 150, // Starts at max value + min_value: Some(50), + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_linear_invalid_starting_at_max_value") + .first_error() + .is_some(), + "Expected error: function starts at max_value and cannot increase" + ); + } + + #[test] + fn test_linear_invalid_starting_at_min_value() { + let dist = DistributionFunction::Linear { + a: -1, // Negative slope (decreasing function) + d: 10, + s: Some(0), + b: 50, // Starts at min value + min_value: Some(50), + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_linear_invalid_starting_at_min_value") + .first_error() + .is_some(), + "Expected error: function starts at min_value and cannot decrease" + ); + } + + #[test] + fn test_linear_valid_with_negative_a() { + let dist = DistributionFunction::Linear { + a: -5, // Valid decreasing function + d: 10, + s: Some(START_MOMENT), + b: 200, + min_value: Some(50), + max_value: Some(250), + }; + let result = dist.validate(START_MOMENT); + + match result { + Ok(validation_result) => { + if let Some(error) = validation_result.first_error() { + panic!( + "Test failed: Expected no error, but got validation error: {:?}", + error + ); + } + } + Err(protocol_error) => { + panic!( + "Test failed: Expected validation success, but got ProtocolError: {:?}", + protocol_error + ); + } + } + } + + #[test] + fn test_linear_valid_with_min_boundary() { + let dist = DistributionFunction::Linear { + a: -3, + d: 5, + s: Some(START_MOMENT), + b: 100, + min_value: Some(10), // Valid min boundary + max_value: Some(150), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_linear_valid_with_min_boundary") + .first_error() + .is_none()); + } + + #[test] + fn test_linear_valid_with_max_boundary() { + let dist = DistributionFunction::Linear { + a: 3, + d: 5, + s: Some(START_MOMENT), + b: 50, + min_value: Some(10), + max_value: Some(MAX_DISTRIBUTION_PARAM), // Valid max boundary + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_linear_valid_with_max_boundary") + .first_error() + .is_none()); + } + } + mod polynomial { + use super::*; + #[test] + fn test_polynomial_valid() { + // f(x) = (2 * x^(2/3)) / 10 + 5 + let dist = DistributionFunction::Polynomial { + a: 2, + d: 10, + m: 2, + n: 3, + o: 0, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(80), + }; + let result = dist.validate(START_MOMENT); + + match &result { + Ok(validation_result) => { + if let Some(error) = validation_result.first_error() { + panic!("Test failed: validation error found: {:?}", error); + } + } + Err(protocol_error) => { + panic!("Test failed: ProtocolError: {:?}", protocol_error); + } + } + } + #[test] + fn test_polynomial_invalid_divide_by_zero() { + let dist = DistributionFunction::Polynomial { + a: 2, + d: 0, + m: 2, + n: 3, + o: 0, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_polynomial_invalid_divide_by_zero") + .first_error() + .is_some()); + } + + // 1. Test invalid: n is zero (division by zero in exponent) + #[test] + fn test_polynomial_invalid_n_zero() { + let dist = DistributionFunction::Polynomial { + a: 2, + d: 10, + m: 2, + n: 0, // Invalid: n == 0 + o: 0, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an error when n is zero" + ); + } + + // 2. Test invalid: shift parameter s exceeds MAX_DISTRIBUTION_PARAM. + #[test] + fn test_polynomial_invalid_s_exceeds_max() { + let dist = DistributionFunction::Polynomial { + a: 2, + d: 10, + m: 2, + n: 3, + o: 0, + s: Some(MAX_DISTRIBUTION_PARAM + 1), // Invalid: s too large + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an error when s exceeds MAX_DISTRIBUTION_PARAM" + ); + } + + // 3. Test invalid: offset o is too high. + #[test] + fn test_polynomial_invalid_o_too_high() { + let dist = DistributionFunction::Polynomial { + a: 2, + d: 10, + m: 2, + n: 3, + o: MAX_DISTRIBUTION_PARAM as i64 + 1, // Invalid: o too high + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an error when o is above the allowed maximum" + ); + } + + // 4. Test invalid: offset o is too low. + #[test] + fn test_polynomial_invalid_o_too_low() { + let dist = DistributionFunction::Polynomial { + a: 2, + d: 10, + m: 2, + n: 3, + o: -(MAX_DISTRIBUTION_PARAM as i64) - 1, // Invalid: o too low + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an error when o is below the allowed minimum" + ); + } + + // 5. Test invalid: max_value exceeds MAX_DISTRIBUTION_PARAM. + #[test] + fn test_polynomial_invalid_max_exceeds_max_distribution() { + let dist = DistributionFunction::Polynomial { + a: 2, + d: 10, + m: 2, + n: 3, + o: 0, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(MAX_DISTRIBUTION_PARAM + 1), // Invalid: max_value too high + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an error when max_value exceeds MAX_DISTRIBUTION_PARAM" + ); + } + + // 6. Test invalid: min_value is greater than max_value. + #[test] + fn test_polynomial_invalid_min_greater_than_max() { + let dist = DistributionFunction::Polynomial { + a: 2, + d: 10, + m: 2, + n: 3, + o: 0, + s: Some(0), + b: 5, + min_value: Some(60), // min_value > max_value + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an error when min_value is greater than max_value" + ); + } + + // 7. Test invalid: For an increasing polynomial function, the starting value equals max_value. + #[test] + fn test_polynomial_invalid_starting_at_max_for_increasing() { + // For an increasing function (a > 0, m > 0) evaluated at x = s, + // the result is b. If b equals max_value, then the function starts at the maximum. + let dist = DistributionFunction::Polynomial { + a: 2, // positive + d: 10, + m: 2, // positive + n: 1, + o: 0, + s: Some(0), + b: 100, // f(0) = 100 + min_value: Some(1), + max_value: Some(100), // Starting at max_value + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an incoherence error when an increasing function starts at max_value" + ); + } + + // 8. Test invalid: For a decreasing polynomial function, the starting value equals min_value. + #[test] + fn test_polynomial_invalid_starting_at_min_for_decreasing() { + // For a decreasing function (a < 0, m > 0 so that a*m < 0), + // evaluated at x = s, the result is b. If b equals min_value, then it's invalid. + let dist = DistributionFunction::Polynomial { + a: -2, // negative + d: 10, + m: 2, // positive => a*m is negative (decreasing) + n: 1, + o: 0, + s: Some(0), + b: 50, // f(0) = 50 + min_value: Some(50), // Starting at min_value + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected an incoherence error when a decreasing function starts at min_value" + ); + } + + // 9. Test valid: Polynomial with no min_value or max_value provided. + #[test] + fn test_polynomial_valid_no_boundaries() { + let dist = DistributionFunction::Polynomial { + a: 3, + d: 10, + m: 2, + n: 1, + o: 0, + s: Some(0), + b: 20, + min_value: None, + max_value: None, + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected valid").first_error().is_none(), + "Expected no validation errors when boundaries are omitted" + ); + } + + // 10. Test valid: Polynomial with fractional exponent (m/n = 3/2). + #[test] + fn test_polynomial_valid_fractional() { + // f(x) = (a * (x - s + o)^(m/n)) / d + b. + // Here: a = 1, d = 1, m = 3, n = 2, s = 0, o = 0, b = 0. + // So f(4) = 4^(3/2) = 8. + let dist = DistributionFunction::Polynomial { + a: 1, + d: 1, + m: 3, + n: 2, + o: 0, + s: Some(0), + b: 0, + min_value: Some(0), + max_value: Some(100), + }; + let eval_result = dist.evaluate(4); + assert_eq!( + eval_result.unwrap(), + 8, + "Expected f(4) to be 8 for a fractional exponent of 3/2" + ); + let validation_result = dist.validate(4); + assert!( + validation_result + .expect("expected valid") + .first_error() + .is_none(), + "Expected no validation errors for a properly configured fractional exponent function" + ); + } + } + mod exp { + use super::*; + #[test] + fn test_exponential_valid() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 10, + m: 1, + n: 2, + o: -3999, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(1000000), + }; + let result = dist.validate(START_MOMENT); + if let Err(err) = &result { + panic!("Test failed: unexpected error: {:?}", err); + } + if let Some(error) = result + .expect("no error on test_exponential_valid") + .first_error() + { + panic!("Test failed: validation error: {:?}", error); + } + } + + #[test] + fn test_exponential_invalid_zero_n() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 10, + m: 1, + n: 0, + o: 1, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_exponential_invalid_zero_n") + .first_error() + .is_some()); + } + + #[test] + fn test_exponential_invalid_zero_m() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 10, + m: 0, // Invalid: `m` should not be zero + n: 2, + o: 1, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_zero_m") + .first_error() + .is_some(), + "Expected error: m should not be zero" + ); + } + + #[test] + fn test_exponential_invalid_zero_a() { + let dist = DistributionFunction::Exponential { + a: 0, // Invalid: `a` cannot be zero + d: 10, + m: 1, + n: 2, + o: 1, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_zero_a") + .first_error() + .is_some(), + "Expected error: a cannot be zero" + ); + } + + #[test] + fn test_exponential_invalid_max_missing_when_m_positive() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 10, + m: 1, // `m > 0`, so `max_value` must be set + n: 2, + o: 1, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: None, // Invalid: max_value must be set + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_max_missing_when_m_positive") + .first_error() + .is_some(), + "Expected error: max_value must be set when m > 0" + ); + } + + #[test] + fn test_exponential_invalid_o_too_large() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 10, + m: 1, + n: 2, + o: MAX_DISTRIBUTION_PARAM as i64 + 1, // Invalid: `o` exceeds allowed range + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_o_too_large") + .first_error() + .is_some(), + "Expected error: o exceeds MAX_DISTRIBUTION_PARAM" + ); + } + + #[test] + fn test_exponential_invalid_min_greater_than_max() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 10, + m: -1, + n: 2, + o: 1, + s: Some(0), + c: 10, + min_value: Some(50), // Invalid: min > max + max_value: Some(30), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_min_greater_than_max") + .first_error() + .is_some(), + "Expected error: min_value > max_value" + ); + } + + #[test] + fn test_exponential_valid_with_negative_m() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 5, + m: -2, // Valid: Decay function (exponential decrease) + n: 4, + o: 2, + s: Some(START_MOMENT), + c: 8, + min_value: Some(2), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_exponential_valid_with_negative_m") + .first_error() + .is_none()); + } + + #[test] + fn test_exponential_valid_with_max_boundary() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 5, + m: 2, + n: 4, + o: 1, + s: Some(START_MOMENT), + c: 8, + min_value: Some(2), + max_value: Some(MAX_DISTRIBUTION_PARAM), // Valid max + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_exponential_valid_with_max_boundary") + .first_error() + .is_none()); + } + + #[test] + fn test_exponential_invalid_large_start_token_amount() { + let dist = DistributionFunction::Exponential { + a: MAX_DISTRIBUTION_PARAM, + d: 1, + m: 1, + n: 1, + o: 1, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(MAX_DISTRIBUTION_PARAM), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_large_start_token_amount") + .first_error() + .is_some(), + "Expected error: start_token_amount exceeds allowed range" + ); + } + + #[test] + fn test_exponential_invalid_a_too_large_for_max() { + let dist = DistributionFunction::Exponential { + a: MAX_DISTRIBUTION_PARAM, // Large `a` + d: 10, + m: 2, // Increasing + n: 1, + o: 0, + s: Some(0), + c: 10, + min_value: Some(1), + max_value: Some(1000), // Small `max_value` + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_a_too_large_for_max") + .first_error() + .is_some(), + "Expected error: `a` is too large, leading to immediate max_value" + ); + } + + #[test] + fn test_exponential_invalid_starts_at_min() { + let dist = DistributionFunction::Exponential { + a: 5, + d: 10, + m: -3, // Decreasing + n: 2, + o: 0, + s: Some(0), + c: 10, + min_value: Some(10), // Function starts at `min_value` + max_value: Some(1000), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_starts_at_min") + .first_error() + .is_some(), + "Expected IncoherenceError: function starts at `min_value`" + ); + } + + #[test] + fn test_exponential_invalid_missing_max_for_positive_m() { + let dist = DistributionFunction::Exponential { + a: 2, + d: 10, + m: 3, // Increasing + n: 2, + o: 1, + s: Some(0), + c: 5, + min_value: Some(1), + max_value: None, // Should fail + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_missing_max_for_positive_m") + .first_error() + .is_some(), + "Expected error: missing `max_value` when `m > 0`" + ); + } + + #[test] + fn test_exponential_invalid_large_o_overflow() { + let dist = DistributionFunction::Exponential { + a: 2, + d: 10, + m: 1, + n: 1, + o: i64::MAX / 2, // Large `o` + s: Some(0), + c: 5, + min_value: Some(1), + max_value: Some(MAX_DISTRIBUTION_PARAM), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_large_o_overflow") + .first_error() + .is_some(), + "Expected error: `o` is too large and causes overflow" + ); + } + + #[test] + fn test_exponential_invalid_a_too_small() { + let dist = DistributionFunction::Exponential { + a: 1, // Tiny `a` + d: 10, + m: -2, // Decreasing + n: 2, + o: 0, + s: Some(0), + c: 10, + min_value: Some(10), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_exponential_invalid_a_too_small") + .first_error() + .is_some(), + "Expected error: `a` is too small to make meaningful changes" + ); + } + + #[test] + fn test_exponential_valid_slow_increase() { + let dist = DistributionFunction::Exponential { + a: 1, + d: 50, + m: 1, // Small positive `m` + n: 10, + o: -3, + s: Some(0), + c: 5, + min_value: Some(10), + max_value: Some(1000), + }; + + let result = dist.validate(5); + + match result { + Ok(validation_result) => { + if let Some(error) = validation_result.first_error() { + panic!("Test failed: Expected no error, but got: {:?}", error); + } + } + Err(protocol_error) => { + panic!( + "Test failed: Expected validation success, but got ProtocolError: {:?}", + protocol_error + ); + } + } + } + + #[test] + fn test_exponential_valid_gentle_decay() { + let dist = DistributionFunction::Exponential { + a: 3, + d: 15, + m: -1, // Small negative `m` + n: 4, + o: 2, + s: Some(START_MOMENT), + c: 8, + min_value: Some(5), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_exponential_valid_gentle_decay") + .first_error() + .is_none()); + } + + #[test] + fn test_exponential_valid_negative_m_with_o_offset() { + let dist = DistributionFunction::Exponential { + a: 5, + d: 8, + m: -2, // Decreasing + n: 3, + o: 5, // Shift start + s: Some(START_MOMENT), + c: 10, + min_value: Some(5), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_exponential_valid_negative_m_with_o_offset") + .first_error() + .is_none()); + } + } + mod log { + use super::*; + #[test] + fn test_logarithmic_valid() { + let dist = DistributionFunction::Logarithmic { + a: 4, + d: 10, + m: 1, + n: 2, + o: 1, + s: None, + b: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_logarithmic_valid") + .first_error() + .is_none()); + } + + #[test] + fn test_logarithmic_invalid_zero_d() { + let dist = DistributionFunction::Logarithmic { + a: 4, + d: 0, // Invalid: Division by zero + m: 1, + n: 2, + o: 1, + s: Some(0), + b: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_logarithmic_invalid_zero_d") + .first_error() + .is_some(), + "Expected division by zero error" + ); + } + + #[test] + fn test_logarithmic_invalid_zero_n() { + let dist = DistributionFunction::Logarithmic { + a: 4, + d: 10, + m: 1, + n: 0, // Invalid: Division by zero in log denominator + o: 1, + s: Some(0), + b: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_logarithmic_invalid_zero_n") + .first_error() + .is_some(), + "Expected division by zero error" + ); + } + + #[test] + fn test_logarithmic_invalid_x_s_o_non_positive() { + let dist = DistributionFunction::Logarithmic { + a: 4, + d: 10, + m: 1, + n: 2, + o: -5, // Causes (x - s + o) to be <= 0 + s: Some(START_MOMENT), + b: 10, + min_value: Some(1), + max_value: Some(100), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_logarithmic_invalid_x_s_o_non_positive") + .first_error() + .is_some(), + "Expected error: (x - s + o) must be > 0" + ); + } + + #[test] + fn test_logarithmic_invalid_max_greater_than_max_param() { + let dist = DistributionFunction::Logarithmic { + a: 4, + d: 10, + m: 1, + n: 2, + o: 1, + s: Some(0), + b: 10, + min_value: Some(1), + max_value: Some(MAX_DISTRIBUTION_PARAM + 1), // Invalid: max_value too large + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_logarithmic_invalid_max_greater_than_max_param") + .first_error() + .is_some(), + "Expected error: max_value exceeds allowed max distribution parameter" + ); + } + + #[test] + fn test_logarithmic_invalid_min_greater_than_max() { + let dist = DistributionFunction::Logarithmic { + a: 4, + d: 10, + m: 1, + n: 2, + o: 1, + s: Some(0), + b: 10, + min_value: Some(50), // Invalid: min > max + max_value: Some(30), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_logarithmic_invalid_min_greater_than_max") + .first_error() + .is_some(), + "Expected error: min_value > max_value" + ); + } + + #[test] + fn test_logarithmic_valid_with_s_and_o() { + let dist = DistributionFunction::Logarithmic { + a: 3, + d: 5, + m: 2, + n: 4, + o: 3, // Offset ensures (x - s + o) > 0 + s: Some(START_MOMENT - 2), + b: 8, + min_value: Some(2), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_logarithmic_valid_with_s_and_o") + .first_error() + .is_none()); + } + + #[test] + fn test_logarithmic_valid_edge_case_max() { + let dist = DistributionFunction::Logarithmic { + a: 3, + d: 5, + m: 2, + n: 4, + o: 1, + s: Some(START_MOMENT), + b: 8, + min_value: Some(2), + max_value: Some(MAX_DISTRIBUTION_PARAM), // Valid max + }; + let result = dist.validate(START_MOMENT); + assert!(result + .expect("no error on test_logarithmic_valid_edge_case_max") + .first_error() + .is_none()); + } + } + mod inverted_log { + use super::*; + #[test] + fn test_inverted_logarithmic_valid() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result + .expect("no error on test_inverted_logarithmic_valid") + .first_error() + .is_none(), + "Expected valid inverted logarithmic function" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_divide_by_zero_d() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 0, // Invalid: d = 0 causes division by zero + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: division by zero (d = 0)" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_zero_n() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 0, // Invalid: n = 0 causes division by zero in log argument + o: 1, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: division by zero (n = 0)" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_zero_m() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 0, // Invalid: m = 0 causes invalid log argument + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: division by zero (m = 0)" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_negative_log_argument() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 100, + o: -10, // Causes log argument to be non-positive + s: Some(START_MOMENT), + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: log argument must be positive" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_exceeds_max_distribution_param() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(MAX_DISTRIBUTION_PARAM + 1), // Invalid: s exceeds max + b: 5, + min_value: Some(1), + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: s exceeds MAX_DISTRIBUTION_PARAM" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_min_greater_than_max() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(60), // Invalid: min > max + max_value: Some(50), + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: min_value > max_value" + ); + } + + #[test] + fn test_inverted_logarithmic_valid_with_max_boundary() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 5, + min_value: Some(1), + max_value: Some(MAX_DISTRIBUTION_PARAM), // Valid max boundary + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected valid").first_error().is_none(), + "Expected valid function with max boundary" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_starting_at_max_for_increasing() { + let dist = DistributionFunction::InvertedLogarithmic { + a: -10, // Increasing function + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 50, // Starts at max_value + min_value: Some(1), + max_value: Some(50), // Function already at max + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: increasing function starts at max_value" + ); + } + + #[test] + fn test_inverted_logarithmic_invalid_starting_at_min_for_decreasing() { + let dist = DistributionFunction::InvertedLogarithmic { + a: 10, // Decreasing function + d: 1, + m: 1, + n: 100, + o: 1, + s: Some(0), + b: 1, // Starts at min_value + min_value: Some(1), + max_value: Some(50), // Function already at min + }; + let result = dist.validate(START_MOMENT); + assert!( + result.expect("expected error").first_error().is_some(), + "Expected error: decreasing function starts at min_value" + ); + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_recipient.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_recipient.rs index 1244588c6ae..4fb132a29a8 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_recipient.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/distribution_recipient.rs @@ -1,11 +1,31 @@ +use crate::data_contract::associated_token::token_distribution_key::{ + TokenDistributionType, TokenDistributionTypeWithResolvedRecipient, +}; +use crate::errors::ProtocolError; use bincode::{Decode, Encode}; +use platform_serialization_derive::PlatformSerialize; use platform_value::Identifier; use serde::{Deserialize, Serialize}; use std::fmt; -#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, Copy, PartialEq, Eq, PartialOrd)] +#[derive( + Serialize, + Deserialize, + Decode, + Encode, + PlatformSerialize, + Debug, + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Default, +)] +#[platform_serialize(unversioned)] pub enum TokenDistributionRecipient { /// Distribute to the contract Owner + #[default] ContractOwner, /// Distribute to a single identity Identity(Identifier), @@ -14,6 +34,121 @@ pub enum TokenDistributionRecipient { EvonodesByParticipation, } +impl TokenDistributionRecipient { + /// Simple resolve matches the contract owner but does not try to resolve the evonodes + pub fn simple_resolve_with_distribution_type( + &self, + owner_id: Identifier, + distribution_type: TokenDistributionType, + ) -> Result { + match distribution_type { + TokenDistributionType::PreProgrammed => match self { + TokenDistributionRecipient::ContractOwner => Ok( + TokenDistributionTypeWithResolvedRecipient::PreProgrammed(owner_id), + ), + TokenDistributionRecipient::Identity(identity) => Ok( + TokenDistributionTypeWithResolvedRecipient::PreProgrammed(*identity), + ), + TokenDistributionRecipient::EvonodesByParticipation => { + Err(ProtocolError::NotSupported( + "trying to simple resolve for pre-programmed evonode distribution" + .to_string(), + )) + } + }, + TokenDistributionType::Perpetual => match self { + TokenDistributionRecipient::ContractOwner => { + Ok(TokenDistributionTypeWithResolvedRecipient::Perpetual( + TokenDistributionResolvedRecipient::ContractOwnerIdentity(owner_id), + )) + } + TokenDistributionRecipient::Identity(identity) => { + Ok(TokenDistributionTypeWithResolvedRecipient::Perpetual( + TokenDistributionResolvedRecipient::Identity(*identity), + )) + } + TokenDistributionRecipient::EvonodesByParticipation => { + Ok(TokenDistributionTypeWithResolvedRecipient::Perpetual( + TokenDistributionResolvedRecipient::Evonode(owner_id), + )) + } + }, + } + } +} + +pub type TokenDistributionWeight = u64; + +// #[derive( +// Serialize, +// Deserialize, +// Decode, +// Encode, +// Debug, +// Clone, +// PartialEq, +// Eq, +// PartialOrd, +// )] +// pub struct EpochProposedBlocks { +// pub block_count: u64, +// pub total_blocks: u64, +// } + +#[derive( + Serialize, + Deserialize, + Decode, + Encode, + PlatformSerialize, + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, +)] +#[platform_serialize(unversioned)] +pub enum TokenDistributionResolvedRecipient { + /// Distribute to a single identity + ContractOwnerIdentity(Identifier), + /// Distribute to a single identity + Identity(Identifier), + /// A single Evonode recipient that should share the token reward + Evonode(Identifier), +} + +impl From for TokenDistributionRecipient { + fn from(value: TokenDistributionResolvedRecipient) -> Self { + match value { + TokenDistributionResolvedRecipient::ContractOwnerIdentity(_) => { + TokenDistributionRecipient::ContractOwner + } + TokenDistributionResolvedRecipient::Identity(identifier) => { + TokenDistributionRecipient::Identity(identifier) + } + TokenDistributionResolvedRecipient::Evonode(_) => { + TokenDistributionRecipient::EvonodesByParticipation + } + } + } +} + +impl From<&TokenDistributionResolvedRecipient> for TokenDistributionRecipient { + fn from(value: &TokenDistributionResolvedRecipient) -> Self { + match value { + TokenDistributionResolvedRecipient::ContractOwnerIdentity(_) => { + TokenDistributionRecipient::ContractOwner + } + TokenDistributionResolvedRecipient::Identity(identifier) => { + TokenDistributionRecipient::Identity(*identifier) + } + TokenDistributionResolvedRecipient::Evonode(_) => { + TokenDistributionRecipient::EvonodesByParticipation + } + } + } +} + impl fmt::Display for TokenDistributionRecipient { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -29,3 +164,20 @@ impl fmt::Display for TokenDistributionRecipient { } } } + +/// Implements `Display` for `TokenDistributionResolvedRecipient` +impl fmt::Display for TokenDistributionResolvedRecipient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TokenDistributionResolvedRecipient::ContractOwnerIdentity(id) => { + write!(f, "ContractOwnerIdentity({})", id) + } + TokenDistributionResolvedRecipient::Identity(id) => { + write!(f, "Identity({})", id) + } + TokenDistributionResolvedRecipient::Evonode(id) => { + write!(f, "Evonode({})", id) + } + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/mod.rs index 758ec8b3881..ec003f69901 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/mod.rs @@ -9,6 +9,7 @@ use std::fmt; pub mod distribution_function; pub mod distribution_recipient; pub mod methods; +pub mod reward_distribution_moment; pub mod reward_distribution_type; pub mod v0; diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_moment/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_moment/mod.rs new file mode 100644 index 00000000000..a4dff02e160 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_moment/mod.rs @@ -0,0 +1,216 @@ +use crate::block::epoch::EpochIndex; +use crate::prelude::{BlockHeight, TimestampMillis}; +use bincode::{Decode, Encode}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::ops::Add; +use crate::block::block_info::BlockInfo; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_type::RewardDistributionType; +use crate::ProtocolError; + +#[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, Copy, PartialEq, Eq, PartialOrd)] +pub enum RewardDistributionMoment { + /// The reward was distributed at a block height + BlockBasedMoment(BlockHeight), + /// The reward was distributed at a time + TimeBasedMoment(TimestampMillis), + /// The reward was distributed at an epoch + EpochBasedMoment(EpochIndex), +} + +impl RewardDistributionMoment { + /// Checks if two `RewardDistributionMoment`s are of the same type. + pub fn same_type(&self, other: &Self) -> bool { + matches!( + (self, other), + (Self::BlockBasedMoment(_), Self::BlockBasedMoment(_)) + | (Self::TimeBasedMoment(_), Self::TimeBasedMoment(_)) + | (Self::EpochBasedMoment(_), Self::EpochBasedMoment(_)) + ) + } + + /// Converts a `RewardDistributionMoment` into a `u64` representation. + /// + /// # Returns + /// - The underlying numerical value of the moment as a `u64`. + pub fn to_u64(&self) -> u64 { + match self { + RewardDistributionMoment::BlockBasedMoment(height) => *height as u64, + RewardDistributionMoment::TimeBasedMoment(timestamp) => *timestamp, + RewardDistributionMoment::EpochBasedMoment(epoch) => *epoch as u64, + } + } +} + +impl From for u64 { + /// Converts a `RewardDistributionMoment` into a `u64`. + /// + /// This conversion preserves the underlying numerical value. + fn from(moment: RewardDistributionMoment) -> Self { + moment.to_u64() + } +} +impl Add for RewardDistributionMoment { + type Output = Result; + + fn add(self, rhs: Self) -> Self::Output { + match (self, rhs) { + ( + RewardDistributionMoment::BlockBasedMoment(a), + RewardDistributionMoment::BlockBasedMoment(b), + ) => a + .checked_add(b) + .map(RewardDistributionMoment::BlockBasedMoment) + .ok_or(ProtocolError::Overflow("Block height addition overflow")), + ( + RewardDistributionMoment::TimeBasedMoment(a), + RewardDistributionMoment::TimeBasedMoment(b), + ) => a + .checked_add(b) + .map(RewardDistributionMoment::TimeBasedMoment) + .ok_or(ProtocolError::Overflow("Timestamp addition overflow")), + ( + RewardDistributionMoment::EpochBasedMoment(a), + RewardDistributionMoment::EpochBasedMoment(b), + ) => a + .checked_add(b) + .map(RewardDistributionMoment::EpochBasedMoment) + .ok_or(ProtocolError::Overflow("Epoch index addition overflow")), + _ => Err(ProtocolError::AddingDifferentTypes( + "Cannot add different types of RewardDistributionMoment".to_string(), + )), + } + } +} + +impl PartialEq<&u64> for RewardDistributionMoment { + fn eq(&self, other: &&u64) -> bool { + match self { + RewardDistributionMoment::BlockBasedMoment(value) => value == *other, + RewardDistributionMoment::TimeBasedMoment(value) => value == *other, + RewardDistributionMoment::EpochBasedMoment(value) => { + if **other > u16::MAX as u64 { + false + } else { + value == &(**other as u16) + } + } + } + } +} + +impl PartialEq for RewardDistributionMoment { + fn eq(&self, other: &u64) -> bool { + self == &other + } +} + +impl PartialEq<&u32> for RewardDistributionMoment { + fn eq(&self, other: &&u32) -> bool { + match self { + RewardDistributionMoment::BlockBasedMoment(value) => *value as u32 == **other, + RewardDistributionMoment::TimeBasedMoment(value) => *value as u32 == **other, + RewardDistributionMoment::EpochBasedMoment(value) => *value as u32 == **other, + } + } +} + +impl PartialEq for RewardDistributionMoment { + fn eq(&self, other: &u32) -> bool { + self == &other + } +} + +impl PartialEq<&u16> for RewardDistributionMoment { + fn eq(&self, other: &&u16) -> bool { + match self { + RewardDistributionMoment::BlockBasedMoment(value) => *value as u16 == **other, + RewardDistributionMoment::TimeBasedMoment(value) => *value as u16 == **other, + RewardDistributionMoment::EpochBasedMoment(value) => *value == **other, + } + } +} + +impl PartialEq for RewardDistributionMoment { + fn eq(&self, other: &u16) -> bool { + self == &other + } +} + +impl PartialEq<&usize> for RewardDistributionMoment { + fn eq(&self, other: &&usize) -> bool { + match self { + RewardDistributionMoment::BlockBasedMoment(value) => *value as usize == **other, + RewardDistributionMoment::TimeBasedMoment(value) => *value as usize == **other, + RewardDistributionMoment::EpochBasedMoment(value) => *value as usize == **other, + } + } +} + +impl PartialEq for RewardDistributionMoment { + fn eq(&self, other: &usize) -> bool { + self == &other + } +} + +impl RewardDistributionMoment { + /// Converts a reference to `BlockInfo` and a `RewardDistributionType` into a `RewardDistributionMoment`. + /// + /// This determines the appropriate `RewardDistributionMoment` based on the type of + /// `RewardDistributionType`. The function selects: + /// - **Block height** for block-based distributions. + /// - **Timestamp (milliseconds)** for time-based distributions. + /// - **Epoch index** for epoch-based distributions. + /// + /// # Arguments + /// + /// * `block_info` - A reference to the `BlockInfo` struct containing blockchain state details. + /// * `distribution_type` - The `RewardDistributionType` to determine which moment should be used. + /// + /// # Returns + /// + /// Returns a `RewardDistributionMoment` corresponding to the type of distribution. + pub fn from_block_info( + block_info: &BlockInfo, + distribution_type: &RewardDistributionType, + ) -> Self { + match distribution_type { + RewardDistributionType::BlockBasedDistribution { .. } => { + RewardDistributionMoment::BlockBasedMoment(block_info.height) + } + RewardDistributionType::TimeBasedDistribution { .. } => { + RewardDistributionMoment::TimeBasedMoment(block_info.time_ms) + } + RewardDistributionType::EpochBasedDistribution { .. } => { + RewardDistributionMoment::EpochBasedMoment(block_info.epoch.index) + } + } + } +} + +impl RewardDistributionMoment { + pub fn to_be_bytes_vec(&self) -> Vec { + match self { + RewardDistributionMoment::BlockBasedMoment(height) => height.to_be_bytes().to_vec(), + RewardDistributionMoment::TimeBasedMoment(time) => time.to_be_bytes().to_vec(), + RewardDistributionMoment::EpochBasedMoment(epoch) => epoch.to_be_bytes().to_vec(), + } + } +} + +/// Implements `Display` for `RewardDistributionMoment` +impl fmt::Display for RewardDistributionMoment { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RewardDistributionMoment::BlockBasedMoment(height) => { + write!(f, "BlockBasedMoment({})", height) + } + RewardDistributionMoment::TimeBasedMoment(timestamp) => { + write!(f, "TimeBasedMoment({})", timestamp) + } + RewardDistributionMoment::EpochBasedMoment(epoch) => { + write!(f, "EpochBasedMoment({})", epoch) + } + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/accessors.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/accessors.rs new file mode 100644 index 00000000000..87a6c83fe63 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/accessors.rs @@ -0,0 +1,76 @@ +use crate::balances::credits::TokenAmount; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_function::DistributionFunction; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_type::RewardDistributionType; + +impl RewardDistributionType { + /// Returns the interval of the distribution. + /// + /// # Returns + /// - `BlockHeightInterval`, `TimestampMillisInterval`, or `EpochInterval`, depending on the variant. + pub fn interval(&self) -> RewardDistributionMoment { + match self { + RewardDistributionType::BlockBasedDistribution { interval, .. } => { + RewardDistributionMoment::BlockBasedMoment(*interval) + } + RewardDistributionType::TimeBasedDistribution { interval, .. } => { + RewardDistributionMoment::TimeBasedMoment(*interval) + } + RewardDistributionType::EpochBasedDistribution { interval, .. } => { + RewardDistributionMoment::EpochBasedMoment(*interval) + } + } + } + + /// Returns the function defining the emission behavior. + /// + /// # Returns + /// - `&DistributionFunction`: The function used for emission calculation. + pub fn function(&self) -> &DistributionFunction { + match self { + RewardDistributionType::BlockBasedDistribution { function, .. } => function, + RewardDistributionType::TimeBasedDistribution { function, .. } => function, + RewardDistributionType::EpochBasedDistribution { function, .. } => function, + } + } + + /// Returns the optional start moment of the distribution. + /// + /// # Returns + /// - `Some(RewardDistributionMoment::BlockBasedMoment)`, `Some(RewardDistributionMoment::TimeBasedMoment)`, + /// or `Some(RewardDistributionMoment::EpochBasedMoment)`, depending on the distribution type. + /// - `None` if the start moment is not set. + pub fn start(&self) -> Option { + match self { + RewardDistributionType::BlockBasedDistribution { start, .. } => { + start.map(RewardDistributionMoment::BlockBasedMoment) + } + RewardDistributionType::TimeBasedDistribution { start, .. } => { + start.map(RewardDistributionMoment::TimeBasedMoment) + } + RewardDistributionType::EpochBasedDistribution { start, .. } => { + start.map(RewardDistributionMoment::EpochBasedMoment) + } + } + } + + /// Returns the optional end moment of the distribution. + /// + /// # Returns + /// - `Some(RewardDistributionMoment::BlockBasedMoment)`, `Some(RewardDistributionMoment::TimeBasedMoment)`, + /// or `Some(RewardDistributionMoment::EpochBasedMoment)`, depending on the distribution type. + /// - `None` if the end moment is not set. + pub fn end(&self) -> Option { + match self { + RewardDistributionType::BlockBasedDistribution { end, .. } => { + end.map(RewardDistributionMoment::BlockBasedMoment) + } + RewardDistributionType::TimeBasedDistribution { end, .. } => { + end.map(RewardDistributionMoment::TimeBasedMoment) + } + RewardDistributionType::EpochBasedDistribution { end, .. } => { + end.map(RewardDistributionMoment::EpochBasedMoment) + } + } + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/evaluate_interval.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/evaluate_interval.rs new file mode 100644 index 00000000000..7f52fe80841 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/evaluate_interval.rs @@ -0,0 +1,40 @@ +use crate::balances::credits::TokenAmount; +use crate::block::block_info::BlockInfo; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_type::RewardDistributionType; +use crate::ProtocolError; + +impl RewardDistributionType { + /// Computes the total rewards emitted in a given interval based on the provided distribution moments. + /// + /// This function determines the emission amounts within the range from `start_at_excluded` (exclusive) + /// up to `end_at_moment_included` (inclusive). The evaluation depends on the specific type of + /// distribution (Block-Based, Time-Based, or Epoch-Based) and the associated interval. + /// + /// # Parameters + /// + /// - `start_at_moment_excluded` (`RewardDistributionMoment`): + /// The last known point after which rewards should be counted (exclusive). + /// - `end_at_moment_included` (`RewardDistributionMoment`): + /// The latest point up to which rewards should be counted (inclusive). + /// + /// # Returns + /// + /// - `Ok(TokenAmount)`: The total sum of emitted rewards in the interval. + /// - `Err(ProtocolError)`: If any evaluation fails (e.g., overflow, invalid configuration). + /// + pub fn rewards_in_interval( + &self, + start_at_moment_excluded: RewardDistributionMoment, + block_info: &BlockInfo, + ) -> Result { + let end_reward_moment = RewardDistributionMoment::from_block_info(block_info, self); + self.function().evaluate_interval_in_bounds( + start_at_moment_excluded, + self.interval(), + end_reward_moment, + self.start(), + self.end(), + ) + } +} diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/mod.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/mod.rs index c95f8b0d0af..5fa6c3e9f8b 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/mod.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/reward_distribution_type/mod.rs @@ -1,43 +1,200 @@ -use crate::balances::credits::TokenAmount; +mod accessors; +mod evaluate_interval; + +use crate::block::epoch::EpochIndex; use crate::data_contract::associated_token::token_perpetual_distribution::distribution_function::DistributionFunction; -use crate::prelude::{BlockHeightInterval, EpochInterval, TimestampMillisInterval}; +use crate::prelude::{BlockHeight, BlockHeightInterval, DataContract, EpochInterval, TimestampMillis, TimestampMillisInterval}; use bincode::{Decode, Encode}; use serde::{Deserialize, Serialize}; use std::fmt; +use crate::data_contract::accessors::v1::DataContractV1Getters; +use crate::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use crate::ProtocolError; #[derive(Serialize, Deserialize, Decode, Encode, Debug, Clone, PartialEq, Eq, PartialOrd)] pub enum RewardDistributionType { /// An amount of tokens is emitted every n blocks - BlockBasedDistribution(BlockHeightInterval, TokenAmount, DistributionFunction), + /// The start and end are included if set + BlockBasedDistribution { + interval: BlockHeightInterval, + function: DistributionFunction, + start: Option, + end: Option, + }, /// An amount of tokens is emitted every amount of time given - TimeBasedDistribution(TimestampMillisInterval, TokenAmount, DistributionFunction), + /// The start and end are included if set + TimeBasedDistribution { + interval: TimestampMillisInterval, + function: DistributionFunction, + start: Option, + end: Option, + }, /// An amount of tokens is emitted every amount of epochs - EpochBasedDistribution(EpochInterval, TokenAmount, DistributionFunction), + /// The start and end are included if set + EpochBasedDistribution { + interval: EpochInterval, + function: DistributionFunction, + start: Option, + end: Option, + }, } +impl RewardDistributionType { + /// Determines the starting moment of reward distribution based on the contract creation time. + /// + /// This function returns the appropriate `RewardDistributionMoment`, which represents when + /// a reward distribution should begin, based on the type of distribution and when the + /// `DataContract` was created. + /// + /// # Arguments + /// + /// * `data_contract` - A reference to the `DataContract`, which contains details about + /// when the contract was created in terms of block height, timestamp, and epoch index. + /// + /// # Returns + /// + /// * `Some(RewardDistributionMoment)` if the contract's creation time can be mapped to + /// a valid distribution start moment. + /// * `None` if the contract creation time is unavailable or not applicable. + pub fn contract_creation_moment( + &self, + data_contract: &DataContract, + ) -> Option { + match self { + RewardDistributionType::BlockBasedDistribution { .. } => data_contract + .created_at_block_height() + .map(RewardDistributionMoment::BlockBasedMoment), + RewardDistributionType::TimeBasedDistribution { .. } => data_contract + .created_at() + .map(RewardDistributionMoment::TimeBasedMoment), + RewardDistributionType::EpochBasedDistribution { .. } => data_contract + .created_at_epoch() + .map(RewardDistributionMoment::EpochBasedMoment), + } + } + /// Converts a byte slice into the corresponding `RewardDistributionMoment` variant + /// based on the type of reward distribution. + /// + /// This method interprets the provided bytes according to the expected type of the distribution: + /// - `BlockBasedDistribution`: Interprets the bytes as a `BlockHeight` (`u64`). + /// - `TimeBasedDistribution`: Interprets the bytes as a `TimestampMillis` (`u64`). + /// - `EpochBasedDistribution`: Interprets the bytes as an `EpochIndex` (`u16`). + /// + /// # Parameters + /// + /// - `bytes`: A byte slice containing the serialized representation of the moment. + /// + /// # Returns + /// + /// - `Ok(RewardDistributionMoment)`: The successfully parsed reward distribution moment. + /// - `Err(ProtocolError)`: If the provided bytes are of incorrect length. + /// + /// # Errors + /// + /// - `ProtocolError::DecodingError`: If the provided bytes slice does not have the expected length + pub fn moment_from_bytes( + &self, + bytes: &[u8], + ) -> Result { + match self { + RewardDistributionType::BlockBasedDistribution { .. } => { + if bytes.len() != 8 { + return Err(ProtocolError::DecodingError( + "Expected 8 bytes for BlockBasedMoment".to_string(), + )); + } + let mut array = [0u8; 8]; + array.copy_from_slice(bytes); + Ok(RewardDistributionMoment::BlockBasedMoment( + u64::from_be_bytes(array), + )) + } + RewardDistributionType::TimeBasedDistribution { .. } => { + if bytes.len() != 8 { + return Err(ProtocolError::DecodingError( + "Expected 8 bytes for TimeBasedMoment".to_string(), + )); + } + let mut array = [0u8; 8]; + array.copy_from_slice(bytes); + Ok(RewardDistributionMoment::TimeBasedMoment( + u64::from_be_bytes(array), + )) + } + RewardDistributionType::EpochBasedDistribution { .. } => { + if bytes.len() != 2 { + return Err(ProtocolError::DecodingError( + "Expected 2 bytes for EpochBasedMoment".to_string(), + )); + } + let mut array = [0u8; 2]; + array.copy_from_slice(bytes); + Ok(RewardDistributionMoment::EpochBasedMoment( + u16::from_be_bytes(array), + )) + } + } + } +} impl fmt::Display for RewardDistributionType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - RewardDistributionType::BlockBasedDistribution(interval, amount, function) => { + RewardDistributionType::BlockBasedDistribution { + interval, + function, + start, + end, + } => { write!( f, - "BlockBasedDistribution: {} tokens every {} blocks using {}", - amount, interval, function - ) + "BlockBasedDistribution: every {} blocks using {}", + interval, function + )?; + if let Some(start) = start { + write!(f, ", starting at block {}", start)?; + } + if let Some(end) = end { + write!(f, ", ending at block {}", end)?; + } + Ok(()) } - RewardDistributionType::TimeBasedDistribution(interval, amount, function) => { + RewardDistributionType::TimeBasedDistribution { + interval, + function, + start, + end, + } => { write!( f, - "TimeBasedDistribution: {} tokens every {} milliseconds using {}", - amount, interval, function - ) + "TimeBasedDistribution: every {} milliseconds using {}", + interval, function + )?; + if let Some(start) = start { + write!(f, ", starting at timestamp {}", start)?; + } + if let Some(end) = end { + write!(f, ", ending at timestamp {}", end)?; + } + Ok(()) } - RewardDistributionType::EpochBasedDistribution(interval, amount, function) => { + RewardDistributionType::EpochBasedDistribution { + interval, + function, + start, + end, + } => { write!( f, - "EpochBasedDistribution: {} tokens every {} epochs using {}", - amount, interval, function - ) + "EpochBasedDistribution: every {} epochs using {}", + interval, function + )?; + if let Some(start) = start { + write!(f, ", starting at epoch {}", start)?; + } + if let Some(end) = end { + write!(f, ", ending at epoch {}", end)?; + } + Ok(()) } } } diff --git a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/v0/methods.rs b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/v0/methods.rs index 9a321c593f2..e1e749390bb 100644 --- a/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/v0/methods.rs +++ b/packages/rs-dpp/src/data_contract/associated_token/token_perpetual_distribution/v0/methods.rs @@ -7,17 +7,17 @@ impl TokenPerpetualDistributionV0Methods for TokenPerpetualDistributionV0 { fn next_interval(&self, block_info: &BlockInfo) -> u64 { match self.distribution_type { // If the distribution is based on block height, return the next height where emissions occur. - RewardDistributionType::BlockBasedDistribution(interval, _, _) => { + RewardDistributionType::BlockBasedDistribution { interval, .. } => { (block_info.height - block_info.height % interval).saturating_add(interval) } // If the distribution is based on time, return the next timestamp in milliseconds. - RewardDistributionType::TimeBasedDistribution(interval, _, _) => { + RewardDistributionType::TimeBasedDistribution { interval, .. } => { (block_info.time_ms - block_info.time_ms % interval).saturating_add(interval) } // If the distribution is based on epochs, return the next epoch index. - RewardDistributionType::EpochBasedDistribution(interval, _, _) => { + RewardDistributionType::EpochBasedDistribution { interval, .. } => { (block_info.epoch.index - block_info.epoch.index % interval) .saturating_add(interval) as u64 } diff --git a/packages/rs-dpp/src/data_contract/document_type/index/mod.rs b/packages/rs-dpp/src/data_contract/document_type/index/mod.rs index 70bb184aa2f..3987591ba93 100644 --- a/packages/rs-dpp/src/data_contract/document_type/index/mod.rs +++ b/packages/rs-dpp/src/data_contract/document_type/index/mod.rs @@ -16,6 +16,7 @@ use crate::ProtocolError; use anyhow::anyhow; use crate::data_contract::document_type::ContestedIndexResolution::MasternodeVote; +#[cfg(feature = "validation")] use crate::data_contract::errors::DataContractError::RegexError; use platform_value::{Value, ValueMap}; use rand::distributions::{Alphanumeric, DistString}; diff --git a/packages/rs-dpp/src/data_contract/factory/v0/mod.rs b/packages/rs-dpp/src/data_contract/factory/v0/mod.rs index 0c07d25a614..635d90ec231 100644 --- a/packages/rs-dpp/src/data_contract/factory/v0/mod.rs +++ b/packages/rs-dpp/src/data_contract/factory/v0/mod.rs @@ -20,6 +20,7 @@ use crate::state_transition::data_contract_create_transition::DataContractCreate #[cfg(feature = "state-transitions")] use crate::state_transition::data_contract_update_transition::DataContractUpdateTransition; +#[cfg(feature = "data-contract-value-conversion")] use crate::data_contract::v1::DataContractV1; use crate::prelude::IdentityNonce; use crate::version::PlatformVersion; @@ -279,7 +280,6 @@ mod tests { assert_eq!(data_contract.schema_defs(), result.schema_defs()); assert_eq!(data_contract.document_schemas(), result.document_schemas()); assert_eq!(data_contract.owner_id(), result.owner_id()); - assert_eq!(data_contract.metadata(), result.metadata()); } #[tokio::test] @@ -302,7 +302,6 @@ mod tests { assert_eq!(data_contract.id(), result.id()); assert_eq!(data_contract.owner_id(), result.owner_id()); assert_eq!(data_contract.document_types(), result.document_types()); - assert_eq!(data_contract.metadata(), result.metadata()); } #[tokio::test] @@ -328,7 +327,6 @@ mod tests { assert_eq!(data_contract.id(), result.id()); assert_eq!(data_contract.owner_id(), result.owner_id()); assert_eq!(data_contract.document_types(), result.document_types()); - assert_eq!(data_contract.metadata(), result.metadata()); } #[test] diff --git a/packages/rs-dpp/src/data_contract/methods/equal_ignoring_time_based_fields/mod.rs b/packages/rs-dpp/src/data_contract/methods/equal_ignoring_time_based_fields/mod.rs new file mode 100644 index 00000000000..c7536ec0196 --- /dev/null +++ b/packages/rs-dpp/src/data_contract/methods/equal_ignoring_time_based_fields/mod.rs @@ -0,0 +1,42 @@ +use crate::data_contract::DataContract; +use crate::ProtocolError; +use platform_version::version::PlatformVersion; + +mod v0; + +impl DataContract { + /// Compares two `DataContract` instances while ignoring time-related fields. + /// + /// This function checks for equality while excluding: + /// - `created_at` + /// - `updated_at` + /// - `created_at_block_height` + /// - `updated_at_block_height` + /// - `created_at_epoch` + /// - `updated_at_epoch` + /// + /// # Arguments + /// - `other`: A reference to another `DataContract` to compare against. + /// + /// # Returns + /// - `true` if all non-time fields match, otherwise `false`. + pub fn equal_ignoring_time_fields( + &self, + other: &DataContract, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .dpp + .contract_versions + .methods + .equal_ignoring_time_fields + { + 0 => Ok(self.equal_ignoring_time_fields_v0(other)), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "DataContract::equal_ignoring_time_fields".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/data_contract/methods/equal_ignoring_time_based_fields/v0/mod.rs b/packages/rs-dpp/src/data_contract/methods/equal_ignoring_time_based_fields/v0/mod.rs new file mode 100644 index 00000000000..0343b11e28e --- /dev/null +++ b/packages/rs-dpp/src/data_contract/methods/equal_ignoring_time_based_fields/v0/mod.rs @@ -0,0 +1,30 @@ +use crate::data_contract::accessors::v0::DataContractV0Getters; +use crate::data_contract::accessors::v1::DataContractV1Getters; +use crate::data_contract::DataContract; + +impl DataContract { + /// Compares two `DataContract` instances while ignoring time-related fields. + /// + /// This function checks for equality while excluding: + /// - `created_at` + /// - `updated_at` + /// - `created_at_block_height` + /// - `updated_at_block_height` + /// - `created_at_epoch` + /// - `updated_at_epoch` + /// + /// # Arguments + /// - `other`: A reference to another `DataContract` to compare against. + /// + /// # Returns + /// - `true` if all non-time fields match, otherwise `false`. + pub(super) fn equal_ignoring_time_fields_v0(&self, other: &DataContract) -> bool { + self.id() == other.id() + && self.version() == other.version() + && self.owner_id() == other.owner_id() + && self.document_types() == other.document_types() + && self.config() == other.config() + && self.groups() == other.groups() + && self.tokens() == other.tokens() + } +} diff --git a/packages/rs-dpp/src/data_contract/methods/mod.rs b/packages/rs-dpp/src/data_contract/methods/mod.rs index 20b64863c06..279eafce441 100644 --- a/packages/rs-dpp/src/data_contract/methods/mod.rs +++ b/packages/rs-dpp/src/data_contract/methods/mod.rs @@ -1,3 +1,4 @@ +mod equal_ignoring_time_based_fields; pub mod schema; #[cfg(feature = "validation")] pub mod validate_document; diff --git a/packages/rs-dpp/src/data_contract/serialized_version/v1/mod.rs b/packages/rs-dpp/src/data_contract/serialized_version/v1/mod.rs index d24ed1402cd..12828ce8f53 100644 --- a/packages/rs-dpp/src/data_contract/serialized_version/v1/mod.rs +++ b/packages/rs-dpp/src/data_contract/serialized_version/v1/mod.rs @@ -2,6 +2,7 @@ use crate::data_contract::config::v0::DataContractConfigV0; use crate::data_contract::config::DataContractConfig; use crate::data_contract::document_type::accessors::DocumentTypeV0Getters; +use crate::block::epoch::EpochIndex; use crate::data_contract::associated_token::token_configuration::TokenConfiguration; use crate::data_contract::group::Group; use crate::data_contract::v0::DataContractV0; @@ -9,6 +10,8 @@ use crate::data_contract::v1::DataContractV1; use crate::data_contract::{ DataContract, DefinitionName, DocumentName, GroupContractPosition, TokenContractPosition, }; +use crate::identity::TimestampMillis; +use crate::prelude::BlockHeight; use bincode::{Decode, Encode}; use platform_value::{Identifier, Value}; use serde::{Deserialize, Serialize}; @@ -36,6 +39,19 @@ pub struct DataContractInSerializationFormatV1 { /// Document JSON Schemas per type pub document_schemas: BTreeMap, + /// The time in milliseconds that the contract was created. + pub created_at: Option, + /// The time in milliseconds that the contract was last updated. + pub updated_at: Option, + /// The block that the document was created. + pub created_at_block_height: Option, + /// The block that the contract was last updated + pub updated_at_block_height: Option, + /// The epoch at which the contract was created. + pub created_at_epoch: Option, + /// The epoch at which the contract was last updated. + pub updated_at_epoch: Option, + /// Groups that allow for specific multiparty actions on the contract #[serde(default, deserialize_with = "deserialize_u16_group_map")] pub groups: BTreeMap, @@ -100,6 +116,12 @@ impl From for DataContractInSerializationFormatV1 { .into_iter() .map(|(key, document_type)| (key, document_type.schema_owned())) .collect(), + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: Default::default(), } @@ -112,9 +134,14 @@ impl From for DataContractInSerializationFormatV1 { owner_id, schema_defs, document_types, + created_at, + updated_at, + created_at_block_height, + updated_at_block_height, + created_at_epoch, + updated_at_epoch, groups, tokens, - .. } = v1; DataContractInSerializationFormatV1 { @@ -127,6 +154,12 @@ impl From for DataContractInSerializationFormatV1 { .into_iter() .map(|(key, document_type)| (key, document_type.schema_owned())) .collect(), + created_at, + updated_at, + created_at_block_height, + updated_at_block_height, + created_at_epoch, + updated_at_epoch, groups, tokens, } diff --git a/packages/rs-dpp/src/data_contract/v0/accessors/mod.rs b/packages/rs-dpp/src/data_contract/v0/accessors/mod.rs index a8fd5cc9331..0332d7c6cf0 100644 --- a/packages/rs-dpp/src/data_contract/v0/accessors/mod.rs +++ b/packages/rs-dpp/src/data_contract/v0/accessors/mod.rs @@ -5,7 +5,6 @@ use crate::data_contract::errors::DataContractError; use crate::data_contract::v0::DataContractV0; use crate::data_contract::DocumentName; -use crate::metadata::Metadata; use crate::data_contract::document_type::accessors::DocumentTypeV0Getters; use platform_value::Identifier; @@ -90,14 +89,6 @@ impl DataContractV0Getters for DataContractV0 { &mut self.document_types } - fn metadata(&self) -> Option<&Metadata> { - self.metadata.as_ref() - } - - fn metadata_mut(&mut self) -> Option<&mut Metadata> { - self.metadata.as_mut() - } - fn config(&self) -> &DataContractConfig { &self.config } @@ -130,10 +121,6 @@ impl DataContractV0Setters for DataContractV0 { self.owner_id = owner_id; } - fn set_metadata(&mut self, metadata: Option) { - self.metadata = metadata; - } - fn set_config(&mut self, config: DataContractConfig) { self.config = config; } diff --git a/packages/rs-dpp/src/data_contract/v1/accessors/mod.rs b/packages/rs-dpp/src/data_contract/v1/accessors/mod.rs index d62a46ddfd0..4d6260a8ad7 100644 --- a/packages/rs-dpp/src/data_contract/v1/accessors/mod.rs +++ b/packages/rs-dpp/src/data_contract/v1/accessors/mod.rs @@ -5,12 +5,14 @@ use crate::data_contract::errors::DataContractError; use crate::data_contract::v1::DataContractV1; use crate::data_contract::{DocumentName, GroupContractPosition, TokenContractPosition}; -use crate::metadata::Metadata; +use crate::block::epoch::EpochIndex; use crate::data_contract::accessors::v1::{DataContractV1Getters, DataContractV1Setters}; use crate::data_contract::associated_token::token_configuration::TokenConfiguration; use crate::data_contract::document_type::accessors::DocumentTypeV0Getters; use crate::data_contract::group::Group; +use crate::identity::TimestampMillis; +use crate::prelude::BlockHeight; use crate::tokens::calculate_token_id; use crate::tokens::errors::TokenError; use crate::ProtocolError; @@ -96,14 +98,6 @@ impl DataContractV0Getters for DataContractV1 { &mut self.document_types } - fn metadata(&self) -> Option<&Metadata> { - self.metadata.as_ref() - } - - fn metadata_mut(&mut self) -> Option<&mut Metadata> { - self.metadata.as_mut() - } - fn config(&self) -> &DataContractConfig { &self.config } @@ -136,10 +130,6 @@ impl DataContractV0Setters for DataContractV1 { self.owner_id = owner_id; } - fn set_metadata(&mut self, metadata: Option) { - self.metadata = metadata; - } - fn set_config(&mut self, config: DataContractConfig) { self.config = config; } @@ -204,6 +194,36 @@ impl DataContractV1Getters for DataContractV1 { .get(&position) .map(|_| calculate_token_id(self.id.as_bytes(), position).into()) } + + /// Returns the timestamp in milliseconds when the contract was created. + fn created_at(&self) -> Option { + self.created_at + } + + /// Returns the timestamp in milliseconds when the contract was last updated. + fn updated_at(&self) -> Option { + self.updated_at + } + + /// Returns the block height at which the contract was created. + fn created_at_block_height(&self) -> Option { + self.created_at_block_height + } + + /// Returns the block height at which the contract was last updated. + fn updated_at_block_height(&self) -> Option { + self.updated_at_block_height + } + + /// Returns the epoch at which the contract was created. + fn created_at_epoch(&self) -> Option { + self.created_at_epoch + } + + /// Returns the epoch at which the contract was last updated. + fn updated_at_epoch(&self) -> Option { + self.updated_at_epoch + } } impl DataContractV1Setters for DataContractV1 { @@ -222,4 +242,34 @@ impl DataContractV1Setters for DataContractV1 { fn add_token(&mut self, name: TokenContractPosition, token: TokenConfiguration) { self.tokens.insert(name, token); } + + /// Sets the timestamp in milliseconds when the contract was created. + fn set_created_at(&mut self, created_at: Option) { + self.created_at = created_at; + } + + /// Sets the timestamp in milliseconds when the contract was last updated. + fn set_updated_at(&mut self, updated_at: Option) { + self.updated_at = updated_at; + } + + /// Sets the block height at which the contract was created. + fn set_created_at_block_height(&mut self, block_height: Option) { + self.created_at_block_height = block_height; + } + + /// Sets the block height at which the contract was last updated. + fn set_updated_at_block_height(&mut self, block_height: Option) { + self.updated_at_block_height = block_height; + } + + /// Sets the epoch at which the contract was created. + fn set_created_at_epoch(&mut self, epoch: Option) { + self.created_at_epoch = epoch; + } + + /// Sets the epoch at which the contract was last updated. + fn set_updated_at_epoch(&mut self, epoch: Option) { + self.updated_at_epoch = epoch; + } } diff --git a/packages/rs-dpp/src/data_contract/v1/data_contract.rs b/packages/rs-dpp/src/data_contract/v1/data_contract.rs index 60a6527fd07..104e6334d94 100644 --- a/packages/rs-dpp/src/data_contract/v1/data_contract.rs +++ b/packages/rs-dpp/src/data_contract/v1/data_contract.rs @@ -1,8 +1,6 @@ use std::collections::BTreeMap; -use platform_value::Identifier; -use platform_value::Value; - +use crate::block::epoch::EpochIndex; use crate::data_contract::associated_token::token_configuration::TokenConfiguration; use crate::data_contract::config::DataContractConfig; use crate::data_contract::document_type::DocumentType; @@ -10,7 +8,10 @@ use crate::data_contract::group::Group; use crate::data_contract::{ DefinitionName, DocumentName, GroupContractPosition, TokenContractPosition, }; -use crate::metadata::Metadata; +use crate::identity::TimestampMillis; +use crate::prelude::BlockHeight; +use platform_value::Identifier; +use platform_value::Value; /// `DataContractV1` represents a data contract in a decentralized platform. /// @@ -23,25 +24,45 @@ use crate::metadata::Metadata; /// /// # Changes from `DataContractV0` to `DataContractV1` /// -/// In `DataContractV1`, two significant features were introduced to enhance contract governance -/// and support token-related operations: +/// In `DataContractV1`, several enhancements were introduced to improve contract governance, +/// support token-related operations, and enhance auditability and traceability of contract updates. /// -/// 1. **Groups** (`groups: BTreeMap`) -/// - Groups allow for specific multiparty actions on the contract. Each group is defined with a -/// set of members (`Identifier`) and their corresponding member power (`u32`). -/// - Groups facilitate fine-grained access control and decision-making processes by enabling -/// required power thresholds for group actions. -/// - This is particularly useful for contracts where multiple parties are involved in controlling -/// or managing contract-specific features. +/// ## 1. **Groups** (`groups: BTreeMap`) +/// - Groups allow for specific multiparty actions on the contract. Each group is defined with a +/// set of members (`Identifier`) and their corresponding member power (`u32`). +/// - Groups facilitate fine-grained access control and decision-making processes by enabling +/// required power thresholds for group actions. +/// - This is particularly useful for contracts where multiple parties are involved in controlling +/// or managing contract-specific features. /// -/// 2. **Tokens** (`tokens: BTreeMap`) -/// - Tokens introduce configurable token-related functionality within the contract, such as -/// base supply, maximum supply, and manual minting/burning rules. -/// - Token configurations include change control rules, ensuring proper governance for -/// modifying supply limits and token-related settings. -/// - This addition enables contracts to define and manage tokens while ensuring compliance -/// with governance rules (e.g., who can mint or burn tokens). +/// ## 2. **Tokens** (`tokens: BTreeMap`) +/// - Tokens introduce configurable token-related functionality within the contract, such as +/// base supply, maximum supply, and manual minting/burning rules. +/// - Token configurations include change control rules, ensuring proper governance for +/// modifying supply limits and token-related settings. +/// - This addition enables contracts to define and manage tokens while ensuring compliance +/// with governance rules (e.g., who can mint or burn tokens). /// +/// ## 3. **Timestamps and Block Height Tracking** +/// To improve traceability and accountability of contract creation and modifications, four +/// new fields were added: +/// +/// - **`created_at`** (`Option`) +/// - Stores the timestamp (in milliseconds) when the contract was originally created. +/// - This provides an immutable record of when the contract came into existence. +/// - **`updated_at`** (`Option`) +/// - Stores the timestamp of the most recent update to the contract. +/// - This helps in tracking contract modifications over time. +/// - **`created_at_block_height`** (`Option`) +/// - Captures the block height at which the contract was created. +/// - This provides an on-chain reference for the state of the contract at creation. +/// - **`updated_at_block_height`** (`Option`) +/// - Captures the block height of the last contract update. +/// - Useful for historical analysis, rollback mechanisms, and ensuring changes are anchored +/// to specific blockchain states. +/// +/// These additions ensure that data contracts are not only more flexible and governed but also +/// fully auditable in terms of when and how they evolve over time. #[derive(Debug, Clone, PartialEq)] pub struct DataContractV1 { /// A unique identifier for the data contract. @@ -57,16 +78,25 @@ pub struct DataContractV1 { /// A mapping of document names to their corresponding document types. pub document_types: BTreeMap, - // TODO: Move metadata from here - /// Optional metadata associated with the contract. - pub metadata: Option, - /// Internal configuration for the contract. pub config: DataContractConfig, /// Shared subschemas to reuse across documents (see $defs) pub schema_defs: Option>, + /// The time in milliseconds that the contract was created. + pub created_at: Option, + /// The time in milliseconds that the contract was last updated. + pub updated_at: Option, + /// The block that the document was created. + pub created_at_block_height: Option, + /// The block that the contract was last updated + pub updated_at_block_height: Option, + /// The epoch at which the contract was created. + pub created_at_epoch: Option, + /// The epoch at which the contract was last updated. + pub updated_at_epoch: Option, + /// Groups that allow for specific multiparty actions on the contract pub groups: BTreeMap, diff --git a/packages/rs-dpp/src/data_contract/v1/serialization/mod.rs b/packages/rs-dpp/src/data_contract/v1/serialization/mod.rs index 87825f6c692..006906381fb 100644 --- a/packages/rs-dpp/src/data_contract/v1/serialization/mod.rs +++ b/packages/rs-dpp/src/data_contract/v1/serialization/mod.rs @@ -104,9 +104,14 @@ impl DataContractV1 { version, owner_id, document_types, - metadata: None, config, schema_defs, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: Default::default(), }; @@ -127,6 +132,12 @@ impl DataContractV1 { owner_id, document_schemas, schema_defs, + created_at, + updated_at, + created_at_block_height, + updated_at_block_height, + created_at_epoch, + updated_at_epoch, groups, tokens, } = data_contract_data; @@ -149,9 +160,14 @@ impl DataContractV1 { version, owner_id, document_types, - metadata: None, config, schema_defs, + created_at, + updated_at, + created_at_block_height, + updated_at_block_height, + created_at_epoch, + updated_at_epoch, groups, tokens, }; diff --git a/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs b/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs index 552a96a9490..70a4f1fc737 100644 --- a/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs +++ b/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs @@ -18,6 +18,10 @@ use crate::consensus::basic::data_contract::{ InvalidDataContractIdError, InvalidDataContractVersionError, InvalidDocumentTypeNameError, InvalidDocumentTypeRequiredSecurityLevelError, InvalidIndexPropertyTypeError, InvalidIndexedPropertyConstraintError, InvalidTokenBaseSupplyError, + InvalidTokenDistributionFunctionDivideByZeroError, + InvalidTokenDistributionFunctionIncoherenceError, + InvalidTokenDistributionFunctionInvalidParameterError, + InvalidTokenDistributionFunctionInvalidParameterTupleError, NonContiguousContractGroupPositionsError, NonContiguousContractTokenPositionsError, SystemPropertyIndexAlreadyPresentError, UndefinedIndexPropertyError, UniqueIndicesLimitReachedError, UnknownDocumentCreationRestrictionModeError, @@ -73,7 +77,8 @@ use crate::consensus::basic::group::GroupActionNotAllowedOnTransitionError; use crate::consensus::basic::overflow_error::OverflowError; use crate::consensus::basic::token::{ ChoosingTokenMintRecipientNotAllowedError, ContractHasNoTokensError, - DestinationIdentityForTokenMintingNotSetError, InvalidActionIdError, InvalidTokenIdError, + DestinationIdentityForTokenMintingNotSetError, InvalidActionIdError, InvalidTokenAmountError, + InvalidTokenConfigUpdateNoChangeError, InvalidTokenIdError, InvalidTokenNoteTooBigError, InvalidTokenPositionError, TokenTransferToOurselfError, }; use crate::consensus::basic::unsupported_version_error::UnsupportedVersionError; @@ -429,12 +434,41 @@ pub enum BasicError { #[error(transparent)] InvalidTokenIdError(InvalidTokenIdError), + #[error(transparent)] + InvalidTokenAmountError(InvalidTokenAmountError), + #[error(transparent)] InvalidTokenPositionError(InvalidTokenPositionError), + #[error(transparent)] + InvalidTokenConfigUpdateNoChangeError(InvalidTokenConfigUpdateNoChangeError), + + #[error(transparent)] + InvalidTokenDistributionFunctionDivideByZeroError( + InvalidTokenDistributionFunctionDivideByZeroError, + ), + + #[error(transparent)] + InvalidTokenDistributionFunctionInvalidParameterError( + InvalidTokenDistributionFunctionInvalidParameterError, + ), + + #[error(transparent)] + InvalidTokenDistributionFunctionInvalidParameterTupleError( + InvalidTokenDistributionFunctionInvalidParameterTupleError, + ), + + #[error(transparent)] + InvalidTokenDistributionFunctionIncoherenceError( + InvalidTokenDistributionFunctionIncoherenceError, + ), + #[error(transparent)] TokenTransferToOurselfError(TokenTransferToOurselfError), + #[error(transparent)] + InvalidTokenNoteTooBigError(InvalidTokenNoteTooBigError), + #[error(transparent)] ContractHasNoTokensError(ContractHasNoTokensError), diff --git a/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_divide_by_zero_error.rs b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_divide_by_zero_error.rs new file mode 100644 index 00000000000..0d61950a806 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_divide_by_zero_error.rs @@ -0,0 +1,44 @@ +use crate::consensus::basic::BasicError; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_function::DistributionFunction; +use crate::errors::ProtocolError; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +use crate::consensus::ConsensusError; + +use bincode::{Decode, Encode}; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error( + "Invalid token distribution function: division by zero in {}", + distribution_function +)] +#[platform_serialize(unversioned)] +pub struct InvalidTokenDistributionFunctionDivideByZeroError { + /* + + DO NOT CHANGE ORDER OF FIELDS WITHOUT INTRODUCING A NEW VERSION + + */ + distribution_function: DistributionFunction, +} + +impl InvalidTokenDistributionFunctionDivideByZeroError { + pub fn new(distribution_function: DistributionFunction) -> Self { + Self { + distribution_function, + } + } + + pub fn distribution_function(&self) -> &DistributionFunction { + &self.distribution_function + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenDistributionFunctionDivideByZeroError) -> Self { + Self::BasicError(BasicError::InvalidTokenDistributionFunctionDivideByZeroError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_incoherence_error.rs b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_incoherence_error.rs new file mode 100644 index 00000000000..da87ea8a346 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_incoherence_error.rs @@ -0,0 +1,38 @@ +use crate::consensus::basic::BasicError; +use crate::errors::ProtocolError; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +use crate::consensus::ConsensusError; + +use bincode::{Decode, Encode}; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error("Incoherent parameters in token distribution function: {}.", message)] +#[platform_serialize(unversioned)] +pub struct InvalidTokenDistributionFunctionIncoherenceError { + /* + + DO NOT CHANGE ORDER OF FIELDS WITHOUT INTRODUCING A NEW VERSION + + */ + message: String, +} + +impl InvalidTokenDistributionFunctionIncoherenceError { + pub fn new(message: String) -> Self { + Self { message } + } + + pub fn message(&self) -> &str { + &self.message + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenDistributionFunctionIncoherenceError) -> Self { + Self::BasicError(BasicError::InvalidTokenDistributionFunctionIncoherenceError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_invalid_parameter_error.rs b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_invalid_parameter_error.rs new file mode 100644 index 00000000000..22d269abe16 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_invalid_parameter_error.rs @@ -0,0 +1,68 @@ +use crate::consensus::basic::BasicError; +use crate::errors::ProtocolError; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +use crate::consensus::ConsensusError; + +use bincode::{Decode, Encode}; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error( + "Invalid parameter `{}` in token distribution function. Expected range: {} to {}{}", + parameter, + min, + max, + if let Some(not_valid) = not_valid { + format!(" except {} (which we got)", not_valid) + } else { + "".to_string() + } +)] +#[platform_serialize(unversioned)] +pub struct InvalidTokenDistributionFunctionInvalidParameterError { + /* + + DO NOT CHANGE ORDER OF FIELDS WITHOUT INTRODUCING A NEW VERSION + + */ + parameter: String, + min: i64, + max: i64, + not_valid: Option, +} + +impl InvalidTokenDistributionFunctionInvalidParameterError { + pub fn new(parameter: String, min: i64, max: i64, not_valid: Option) -> Self { + Self { + parameter, + min, + max, + not_valid, + } + } + + pub fn parameter(&self) -> &str { + &self.parameter + } + + pub fn min(&self) -> i64 { + self.min + } + + pub fn max(&self) -> i64 { + self.max + } + + pub fn not_valid(&self) -> Option { + self.not_valid + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenDistributionFunctionInvalidParameterError) -> Self { + Self::BasicError(BasicError::InvalidTokenDistributionFunctionInvalidParameterError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_invalid_parameter_tuple_error.rs b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_invalid_parameter_tuple_error.rs new file mode 100644 index 00000000000..39d0d444a0c --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/data_contract/invalid_token_distribution_function_invalid_parameter_tuple_error.rs @@ -0,0 +1,59 @@ +use crate::consensus::basic::BasicError; +use crate::errors::ProtocolError; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +use crate::consensus::ConsensusError; + +use bincode::{Decode, Encode}; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error( + "Invalid parameter tuple in token distribution function: `{}` must be {} `{}`", + first_parameter, + relation, + second_parameter +)] +#[platform_serialize(unversioned)] +pub struct InvalidTokenDistributionFunctionInvalidParameterTupleError { + /* + + DO NOT CHANGE ORDER OF FIELDS WITHOUT INTRODUCING A NEW VERSION + + */ + first_parameter: String, + second_parameter: String, + relation: String, // "greater than" or "smaller than" +} + +impl InvalidTokenDistributionFunctionInvalidParameterTupleError { + pub fn new(first_parameter: String, second_parameter: String, relation: String) -> Self { + Self { + first_parameter, + second_parameter, + relation, + } + } + + pub fn first_parameter(&self) -> &str { + &self.first_parameter + } + + pub fn second_parameter(&self) -> &str { + &self.second_parameter + } + + pub fn relation(&self) -> &str { + &self.relation + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenDistributionFunctionInvalidParameterTupleError) -> Self { + Self::BasicError( + BasicError::InvalidTokenDistributionFunctionInvalidParameterTupleError(err), + ) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/data_contract/mod.rs b/packages/rs-dpp/src/errors/consensus/basic/data_contract/mod.rs index 529edda651b..cf505b55967 100644 --- a/packages/rs-dpp/src/errors/consensus/basic/data_contract/mod.rs +++ b/packages/rs-dpp/src/errors/consensus/basic/data_contract/mod.rs @@ -28,6 +28,10 @@ mod invalid_indexed_property_constraint_error; #[cfg(feature = "json-schema-validation")] mod invalid_json_schema_ref_error; mod invalid_token_base_supply_error; +mod invalid_token_distribution_function_divide_by_zero_error; +mod invalid_token_distribution_function_incoherence_error; +mod invalid_token_distribution_function_invalid_parameter_error; +mod invalid_token_distribution_function_invalid_parameter_tuple_error; mod non_contiguous_contract_group_positions_error; mod non_contiguous_contract_token_positions_error; mod system_property_index_already_present_error; @@ -73,6 +77,10 @@ pub use group_total_power_has_less_than_required_power_error::*; pub use incompatible_document_type_schema_error::*; pub use invalid_document_type_name_error::*; pub use invalid_token_base_supply_error::*; +pub use invalid_token_distribution_function_divide_by_zero_error::*; +pub use invalid_token_distribution_function_incoherence_error::*; +pub use invalid_token_distribution_function_invalid_parameter_error::*; +pub use invalid_token_distribution_function_invalid_parameter_tuple_error::*; pub use non_contiguous_contract_group_positions_error::*; pub use non_contiguous_contract_token_positions_error::*; pub use unknown_document_creation_restriction_mode_error::*; diff --git a/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_amount_error.rs b/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_amount_error.rs new file mode 100644 index 00000000000..8f7ecc204f2 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_amount_error.rs @@ -0,0 +1,46 @@ +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::ProtocolError; +use bincode::{Decode, Encode}; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error( + "Invalid token amount {}, exceeds maximum allowed {}", + token_amount, + max_token_amount +)] +#[platform_serialize(unversioned)] +pub struct InvalidTokenAmountError { + max_token_amount: u64, + token_amount: u64, +} + +impl InvalidTokenAmountError { + /// Creates a new `InvalidTokenAmountError`. + pub fn new(max_token_amount: u64, token_amount: u64) -> Self { + Self { + max_token_amount, + token_amount, + } + } + + /// Returns the maximum allowed token amount. + pub fn max_token_amount(&self) -> u64 { + self.max_token_amount + } + + /// Returns the invalid token amount that was provided. + pub fn token_amount(&self) -> u64 { + self.token_amount + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenAmountError) -> Self { + Self::BasicError(BasicError::InvalidTokenAmountError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_config_update_no_change_error.rs b/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_config_update_no_change_error.rs new file mode 100644 index 00000000000..a588d9ff0e0 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_config_update_no_change_error.rs @@ -0,0 +1,26 @@ +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::ProtocolError; +use bincode::{Decode, Encode}; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error("Invalid token configuration update: no changes were made")] +#[platform_serialize(unversioned)] +pub struct InvalidTokenConfigUpdateNoChangeError; + +impl InvalidTokenConfigUpdateNoChangeError { + /// Creates a new `InvalidTokenConfigUpdateError`. + pub fn new() -> Self { + Self + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenConfigUpdateNoChangeError) -> Self { + Self::BasicError(BasicError::InvalidTokenConfigUpdateNoChangeError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_note_too_big_error.rs b/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_note_too_big_error.rs new file mode 100644 index 00000000000..30e289021c8 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/token/invalid_token_note_too_big_error.rs @@ -0,0 +1,54 @@ +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::ProtocolError; +use bincode::{Decode, Encode}; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error( + "Invalid token note: '{}' is too long ({} bytes), max allowed is {} bytes", + note_type, + note_length, + max_note_length +)] +#[platform_serialize(unversioned)] +pub struct InvalidTokenNoteTooBigError { + max_note_length: u32, + note_type: String, + note_length: u32, +} + +impl InvalidTokenNoteTooBigError { + /// Creates a new `InvalidTokenNoteTooBigError`. + pub fn new(max_note_length: u32, note_type: &str, note_length: u32) -> Self { + Self { + max_note_length, + note_type: note_type.to_string(), + note_length, + } + } + + /// Returns the maximum allowed note length. + pub fn max_note_length(&self) -> u32 { + self.max_note_length + } + + /// Returns the type of note that exceeded the allowed length. + pub fn note_type(&self) -> &str { + &self.note_type + } + + /// Returns the actual note length that was too large. + pub fn note_length(&self) -> u32 { + self.note_length + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenNoteTooBigError) -> Self { + Self::BasicError(BasicError::InvalidTokenNoteTooBigError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/token/mod.rs b/packages/rs-dpp/src/errors/consensus/basic/token/mod.rs index d77dfff6fab..971cd92a4b4 100644 --- a/packages/rs-dpp/src/errors/consensus/basic/token/mod.rs +++ b/packages/rs-dpp/src/errors/consensus/basic/token/mod.rs @@ -2,7 +2,10 @@ mod choosing_token_mint_recipient_not_allowed_error; mod contract_has_no_tokens_error; mod destination_identity_for_token_minting_not_set_error; mod invalid_action_id_error; +mod invalid_token_amount_error; +mod invalid_token_config_update_no_change_error; mod invalid_token_id_error; +mod invalid_token_note_too_big_error; mod invalid_token_position_error; mod token_transfer_to_ourselves_error; @@ -10,6 +13,9 @@ pub use choosing_token_mint_recipient_not_allowed_error::*; pub use contract_has_no_tokens_error::*; pub use destination_identity_for_token_minting_not_set_error::*; pub use invalid_action_id_error::*; +pub use invalid_token_amount_error::*; +pub use invalid_token_config_update_no_change_error::*; pub use invalid_token_id_error::*; +pub use invalid_token_note_too_big_error::*; pub use invalid_token_position_error::*; pub use token_transfer_to_ourselves_error::*; diff --git a/packages/rs-dpp/src/errors/consensus/codes.rs b/packages/rs-dpp/src/errors/consensus/codes.rs index 67177be3766..62066426094 100644 --- a/packages/rs-dpp/src/errors/consensus/codes.rs +++ b/packages/rs-dpp/src/errors/consensus/codes.rs @@ -102,6 +102,10 @@ impl ErrorWithCode for BasicError { Self::InvalidTokenBaseSupplyError(_) => 10251, Self::NonContiguousContractGroupPositionsError(_) => 10252, Self::NonContiguousContractTokenPositionsError(_) => 10253, + Self::InvalidTokenDistributionFunctionDivideByZeroError(_) => 10254, + Self::InvalidTokenDistributionFunctionInvalidParameterError(_) => 10254, + Self::InvalidTokenDistributionFunctionInvalidParameterTupleError(_) => 10255, + Self::InvalidTokenDistributionFunctionIncoherenceError(_) => 10256, // Group Errors: 10350-10399 Self::GroupPositionDoesNotExistError(_) => 10350, @@ -141,6 +145,9 @@ impl ErrorWithCode for BasicError { Self::DestinationIdentityForTokenMintingNotSetError(_) => 10454, Self::ChoosingTokenMintRecipientNotAllowedError(_) => 10455, Self::TokenTransferToOurselfError(_) => 10456, + Self::InvalidTokenConfigUpdateNoChangeError(_) => 10457, + Self::InvalidTokenAmountError(_) => 10458, + Self::InvalidTokenNoteTooBigError(_) => 10459, // Identity Errors: 10500-10599 Self::DuplicatedIdentityPublicKeyBasicError(_) => 10500, @@ -258,6 +265,7 @@ impl ErrorWithCode for StateError { Self::IdentityTokenAccountAlreadyFrozenError(_) => 40162, Self::TokenAlreadyPausedError(_) => 40163, Self::TokenNotPausedError(_) => 40164, + Self::InvalidTokenClaimPropertyMismatch(_) => 40165, // Identity Errors: 40200-40299 Self::IdentityAlreadyExistsError(_) => 40200, diff --git a/packages/rs-dpp/src/errors/consensus/state/state_error.rs b/packages/rs-dpp/src/errors/consensus/state/state_error.rs index 9e5a73ae66c..168d57bea5e 100644 --- a/packages/rs-dpp/src/errors/consensus/state/state_error.rs +++ b/packages/rs-dpp/src/errors/consensus/state/state_error.rs @@ -42,9 +42,7 @@ use crate::consensus::state::identity::missing_transfer_key_error::MissingTransf use crate::consensus::state::identity::no_transfer_key_for_core_withdrawal_available_error::NoTransferKeyForCoreWithdrawalAvailableError; use crate::consensus::state::prefunded_specialized_balances::prefunded_specialized_balance_insufficient_error::PrefundedSpecializedBalanceInsufficientError; use crate::consensus::state::prefunded_specialized_balances::prefunded_specialized_balance_not_found_error::PrefundedSpecializedBalanceNotFoundError; -use crate::consensus::state::token::{IdentityDoesNotHaveEnoughTokenBalanceError, IdentityTokenAccountFrozenError, IdentityTokenAccountNotFrozenError, InvalidGroupPositionError, NewAuthorizedActionTakerGroupDoesNotExistError, NewAuthorizedActionTakerIdentityDoesNotExistError, NewAuthorizedActionTakerMainGroupNotSetError, NewTokensDestinationIdentityDoesNotExistError, TokenMintPastMaxSupplyError, TokenSettingMaxSupplyToLessThanCurrentSupplyError, UnauthorizedTokenActionError, IdentityTokenAccountAlreadyFrozenError, TokenAlreadyPausedError, TokenIsPausedError, - TokenNotPausedError, -}; +use crate::consensus::state::token::{IdentityDoesNotHaveEnoughTokenBalanceError, IdentityTokenAccountFrozenError, IdentityTokenAccountNotFrozenError, InvalidGroupPositionError, NewAuthorizedActionTakerGroupDoesNotExistError, NewAuthorizedActionTakerIdentityDoesNotExistError, NewAuthorizedActionTakerMainGroupNotSetError, NewTokensDestinationIdentityDoesNotExistError, TokenMintPastMaxSupplyError, TokenSettingMaxSupplyToLessThanCurrentSupplyError, UnauthorizedTokenActionError, IdentityTokenAccountAlreadyFrozenError, TokenAlreadyPausedError, TokenIsPausedError, TokenNotPausedError, InvalidTokenClaimPropertyMismatch}; use crate::consensus::state::voting::masternode_incorrect_voter_identity_id_error::MasternodeIncorrectVoterIdentityIdError; use crate::consensus::state::voting::masternode_incorrect_voting_address_error::MasternodeIncorrectVotingAddressError; use crate::consensus::state::voting::masternode_not_found_error::MasternodeNotFoundError; @@ -241,6 +239,9 @@ pub enum StateError { #[error(transparent)] TokenMintPastMaxSupplyError(TokenMintPastMaxSupplyError), + #[error(transparent)] + InvalidTokenClaimPropertyMismatch(InvalidTokenClaimPropertyMismatch), + #[error(transparent)] NewTokensDestinationIdentityDoesNotExistError(NewTokensDestinationIdentityDoesNotExistError), diff --git a/packages/rs-dpp/src/errors/consensus/state/token/invalid_token_claim_property_mismatch.rs b/packages/rs-dpp/src/errors/consensus/state/token/invalid_token_claim_property_mismatch.rs new file mode 100644 index 00000000000..f6261f1e549 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/state/token/invalid_token_claim_property_mismatch.rs @@ -0,0 +1,47 @@ +use crate::consensus::state::state_error::StateError; +use crate::consensus::ConsensusError; +use crate::prelude::Identifier; +use crate::ProtocolError; +use bincode::{Decode, Encode}; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error( + "Invalid token release property mismatch for '{}', token ID: {}", + property, + token_id +)] +#[platform_serialize(unversioned)] +pub struct InvalidTokenClaimPropertyMismatch { + property: String, + token_id: Identifier, +} + +impl InvalidTokenClaimPropertyMismatch { + /// Creates a new `InvalidTokenClaimPropertyMismatch` error. + pub fn new(property: impl Into, token_id: Identifier) -> Self { + Self { + property: property.into(), + token_id, + } + } + + /// Returns the property name that caused the mismatch. + pub fn property(&self) -> &str { + &self.property + } + + /// Returns the token ID associated with the mismatch. + pub fn token_id(&self) -> Identifier { + self.token_id + } +} + +impl From for ConsensusError { + fn from(err: InvalidTokenClaimPropertyMismatch) -> Self { + Self::StateError(StateError::InvalidTokenClaimPropertyMismatch(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/state/token/mod.rs b/packages/rs-dpp/src/errors/consensus/state/token/mod.rs index fe7b71a5329..bf66a2ae1f4 100644 --- a/packages/rs-dpp/src/errors/consensus/state/token/mod.rs +++ b/packages/rs-dpp/src/errors/consensus/state/token/mod.rs @@ -3,6 +3,7 @@ mod identity_token_account_already_frozen_error; mod identity_token_account_frozen_error; mod identity_token_account_not_frozen_error; mod invalid_group_position_error; +mod invalid_token_claim_property_mismatch; mod new_authorized_action_taker_group_does_not_exist_error; mod new_authorized_action_taker_identity_does_not_exist_error; mod new_authorized_action_taker_main_group_not_set_error; @@ -19,6 +20,7 @@ pub use identity_token_account_already_frozen_error::*; pub use identity_token_account_frozen_error::*; pub use identity_token_account_not_frozen_error::*; pub use invalid_group_position_error::*; +pub use invalid_token_claim_property_mismatch::*; pub use new_authorized_action_taker_group_does_not_exist_error::*; pub use new_authorized_action_taker_identity_does_not_exist_error::*; pub use new_authorized_action_taker_main_group_not_set_error::*; diff --git a/packages/rs-dpp/src/errors/protocol_error.rs b/packages/rs-dpp/src/errors/protocol_error.rs index 7c5135a005c..da040d1e69b 100644 --- a/packages/rs-dpp/src/errors/protocol_error.rs +++ b/packages/rs-dpp/src/errors/protocol_error.rs @@ -204,6 +204,9 @@ pub enum ProtocolError { #[error("overflow error: {0}")] Overflow(&'static str), + #[error("divide by zero error: {0}")] + DivideByZero(&'static str), + /// Error #[error("missing key: {0}")] DesiredKeyWithTypePurposeSecurityLevelMissing(String), @@ -271,6 +274,13 @@ pub enum ProtocolError { #[error("Signature wrong size: expected 96, got {got}")] BlsSignatureSizeError { got: u32 }, + + /// Error when trying to add two different types of `RewardDistributionMoment`. + #[error("Attempted to add incompatible types of RewardDistributionMoment: {0}")] + AddingDifferentTypes(String), + + #[error("invalid distribution step error: {0}")] + InvalidDistributionStep(&'static str), } impl From<&str> for ProtocolError { diff --git a/packages/rs-dpp/src/lib.rs b/packages/rs-dpp/src/lib.rs index adde634d896..66599277405 100644 --- a/packages/rs-dpp/src/lib.rs +++ b/packages/rs-dpp/src/lib.rs @@ -82,6 +82,8 @@ pub mod prelude { pub type BlockHeight = u64; + pub type FeeMultiplier = u64; + pub type BlockHeightInterval = u64; pub type CoreBlockHeight = u32; diff --git a/packages/rs-dpp/src/state_transition/mod.rs b/packages/rs-dpp/src/state_transition/mod.rs index 88a3eddcfe2..782c970aa04 100644 --- a/packages/rs-dpp/src/state_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/mod.rs @@ -361,6 +361,7 @@ impl StateTransition { BatchedTransitionRef::Token(TokenTransition::ConfigUpdate(_)) => { "TokenConfigUpdate" } + BatchedTransitionRef::Token(TokenTransition::Claim(_)) => "TokenClaim", }; document_transition_types.push(document_transition_name); } diff --git a/packages/rs-dpp/src/state_transition/proof_result.rs b/packages/rs-dpp/src/state_transition/proof_result.rs index b20938b49de..33e6e2be0ce 100644 --- a/packages/rs-dpp/src/state_transition/proof_result.rs +++ b/packages/rs-dpp/src/state_transition/proof_result.rs @@ -22,4 +22,5 @@ pub enum StateTransitionProofResult { VerifiedDocuments(BTreeMap>), VerifiedTokenActionWithDocument(Document), VerifiedMasternodeVote(Vote), + VerifiedNextDistribution(Vote), } diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/document_transition.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/document_transition.rs index 85abbe5c682..ed63c8524a7 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/document_transition.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/document_transition.rs @@ -5,7 +5,7 @@ use derive_more::{Display, From}; use serde::{Deserialize, Serialize}; use bincode::{Encode, Decode}; use crate::prelude::{IdentityNonce, Revision}; -use crate::state_transition::batch_transition::{DocumentCreateTransition, DocumentDeleteTransition, DocumentReplaceTransition, TokenBurnTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, TokenMintTransition, TokenTransferTransition, TokenUnfreezeTransition}; +use crate::state_transition::batch_transition::{DocumentCreateTransition, DocumentDeleteTransition, DocumentReplaceTransition, TokenBurnTransition, TokenConfigUpdateTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, TokenMintTransition, TokenClaimTransition, TokenTransferTransition, TokenUnfreezeTransition}; use crate::state_transition::batch_transition::batched_transition::{DocumentPurchaseTransition, DocumentTransferTransition, DocumentUpdatePriceTransition}; use crate::state_transition::batch_transition::batched_transition::document_purchase_transition::v0::v0_methods::DocumentPurchaseTransitionV0Methods; use crate::state_transition::batch_transition::batched_transition::document_transfer_transition::v0::v0_methods::DocumentTransferTransitionV0Methods; @@ -108,9 +108,17 @@ impl BatchTransitionResolversV0 for DocumentTransition { None } + fn as_transition_token_claim(&self) -> Option<&TokenClaimTransition> { + None + } + fn as_transition_token_emergency_action(&self) -> Option<&TokenEmergencyActionTransition> { None } + + fn as_transition_token_config_update(&self) -> Option<&TokenConfigUpdateTransition> { + None + } } pub trait DocumentTransitionV0Methods { diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/mod.rs index 39d0c5e2ff3..3ccbc5fb916 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/mod.rs @@ -16,6 +16,7 @@ pub mod multi_party_action; mod resolvers; pub mod token_base_transition; pub mod token_burn_transition; +pub mod token_claim_transition; pub mod token_config_update_transition; pub mod token_destroy_frozen_funds_transition; pub mod token_emergency_action_transition; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/resolvers.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/resolvers.rs index bf1d555544f..572056411ff 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/resolvers.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/resolvers.rs @@ -4,8 +4,9 @@ use crate::state_transition::batch_transition::batched_transition::{ use crate::state_transition::batch_transition::resolvers::v0::BatchTransitionResolversV0; use crate::state_transition::batch_transition::{ DocumentCreateTransition, DocumentDeleteTransition, DocumentReplaceTransition, - TokenBurnTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, - TokenFreezeTransition, TokenMintTransition, TokenTransferTransition, TokenUnfreezeTransition, + TokenBurnTransition, TokenClaimTransition, TokenConfigUpdateTransition, + TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, + TokenMintTransition, TokenTransferTransition, TokenUnfreezeTransition, }; impl BatchTransitionResolversV0 for BatchedTransition { @@ -88,12 +89,26 @@ impl BatchTransitionResolversV0 for BatchedTransition { } } + fn as_transition_token_claim(&self) -> Option<&TokenClaimTransition> { + match self { + BatchedTransition::Document(_) => None, + BatchedTransition::Token(token) => token.as_transition_token_claim(), + } + } + fn as_transition_token_emergency_action(&self) -> Option<&TokenEmergencyActionTransition> { match self { BatchedTransition::Document(_) => None, BatchedTransition::Token(token) => token.as_transition_token_emergency_action(), } } + + fn as_transition_token_config_update(&self) -> Option<&TokenConfigUpdateTransition> { + match self { + BatchedTransition::Document(_) => None, + BatchedTransition::Token(token) => token.as_transition_token_config_update(), + } + } } impl<'a> BatchTransitionResolversV0 for BatchedTransitionRef<'a> { @@ -176,10 +191,24 @@ impl<'a> BatchTransitionResolversV0 for BatchedTransitionRef<'a> { } } + fn as_transition_token_claim(&self) -> Option<&TokenClaimTransition> { + match self { + BatchedTransitionRef::Document(_) => None, + BatchedTransitionRef::Token(token) => token.as_transition_token_claim(), + } + } + fn as_transition_token_emergency_action(&self) -> Option<&TokenEmergencyActionTransition> { match self { BatchedTransitionRef::Document(_) => None, BatchedTransitionRef::Token(token) => token.as_transition_token_emergency_action(), } } + + fn as_transition_token_config_update(&self) -> Option<&TokenConfigUpdateTransition> { + match self { + BatchedTransitionRef::Document(_) => None, + BatchedTransitionRef::Token(token) => token.as_transition_token_config_update(), + } + } } diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/mod.rs index 5c07e9a97de..027f204cb82 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..9ae505888f0 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/validate_structure/mod.rs @@ -0,0 +1,35 @@ +use crate::state_transition::batch_transition::token_burn_transition::validate_structure::v0::TokenBurnTransitionActionStructureValidationV0; +use crate::state_transition::batch_transition::TokenBurnTransition; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; +use platform_version::version::PlatformVersion; +mod v0; + +pub trait TokenBurnTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenBurnTransitionStructureValidation for TokenBurnTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_burn_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenBurnTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..e56c21e06e1 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_burn_transition/validate_structure/v0/mod.rs @@ -0,0 +1,39 @@ +use crate::consensus::basic::token::{InvalidTokenAmountError, InvalidTokenNoteTooBigError}; +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::state_transition::batch_transition::token_burn_transition::v0::v0_methods::TokenBurnTransitionV0Methods; +use crate::state_transition::batch_transition::TokenBurnTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; + +pub(super) trait TokenBurnTransitionActionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenBurnTransitionActionStructureValidationV0 for TokenBurnTransition { + fn validate_structure_v0(&self) -> Result { + if self.burn_amount() > i64::MAX as u64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenAmountError( + InvalidTokenAmountError::new(i64::MAX as u64, self.burn_amount()), + )), + )); + } + + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/mod.rs new file mode 100644 index 00000000000..f142a897828 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/mod.rs @@ -0,0 +1,25 @@ +pub mod v0; +mod v0_methods; +pub mod validate_structure; + +use bincode::{Decode, Encode}; +use derive_more::{Display, From}; +#[cfg(feature = "state-transition-serde-conversion")] +use serde::{Deserialize, Serialize}; +pub use v0::TokenClaimTransitionV0; + +#[derive(Debug, Clone, Encode, Decode, PartialEq, Display, From)] +#[cfg_attr( + feature = "state-transition-serde-conversion", + derive(Serialize, Deserialize) +)] +pub enum TokenClaimTransition { + #[display("V0({})", "_0")] + V0(TokenClaimTransitionV0), +} + +impl Default for TokenClaimTransition { + fn default() -> Self { + TokenClaimTransition::V0(TokenClaimTransitionV0::default()) // since only v0 + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0/mod.rs new file mode 100644 index 00000000000..111728f77f8 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0/mod.rs @@ -0,0 +1,38 @@ +pub mod v0_methods; + +/// The Identifier fields in [`TokenClaimTransition`] +pub use super::super::document_base_transition::IDENTIFIER_FIELDS; +use crate::data_contract::associated_token::token_distribution_key::TokenDistributionType; +use crate::state_transition::batch_transition::token_base_transition::TokenBaseTransition; +use bincode::{Decode, Encode}; +#[cfg(feature = "state-transition-serde-conversion")] +use serde::{Deserialize, Serialize}; +use std::fmt; + +#[derive(Debug, Clone, Default, Encode, Decode, PartialEq)] +#[cfg_attr( + feature = "state-transition-serde-conversion", + derive(Serialize, Deserialize), + serde(rename_all = "camelCase") +)] +pub struct TokenClaimTransitionV0 { + /// Document Base Transition + #[cfg_attr(feature = "state-transition-serde-conversion", serde(flatten))] + pub base: TokenBaseTransition, + /// The type of distribution we are targeting + pub distribution_type: TokenDistributionType, + /// A public note, this will only get saved to the state if we are using a historical contract + pub public_note: Option, +} + +impl fmt::Display for TokenClaimTransitionV0 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "TokenClaimTransitionV0 {{ base: {}, distribution_type: {}, public_note: {} }}", + self.base, // Assuming TokenBaseTransition implements Display + self.distribution_type, + self.public_note.as_deref().unwrap_or("None") + ) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0/v0_methods.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0/v0_methods.rs new file mode 100644 index 00000000000..9e448d4256a --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0/v0_methods.rs @@ -0,0 +1,63 @@ +use crate::data_contract::associated_token::token_distribution_key::TokenDistributionType; +use crate::state_transition::batch_transition::batched_transition::token_claim_transition::TokenClaimTransitionV0; +use crate::state_transition::batch_transition::token_base_transition::token_base_transition_accessors::TokenBaseTransitionAccessors; +use crate::state_transition::batch_transition::token_base_transition::TokenBaseTransition; + +impl TokenBaseTransitionAccessors for TokenClaimTransitionV0 { + fn base(&self) -> &TokenBaseTransition { + &self.base + } + + fn base_mut(&mut self) -> &mut TokenBaseTransition { + &mut self.base + } + + fn set_base(&mut self, base: TokenBaseTransition) { + self.base = base; + } +} +pub trait TokenClaimTransitionV0Methods { + /// Returns the `distribution_type` field of the `TokenClaimTransitionV0`. + fn distribution_type(&self) -> TokenDistributionType; + + /// Returns the owned `distribution_type` field of the `TokenClaimTransitionV0`. + fn distribution_type_owned(self) -> TokenDistributionType; + + /// Sets the `distribution_type` field in the `TokenClaimTransitionV0`. + fn set_distribution_type(&mut self, distribution_type: TokenDistributionType); + + /// Returns the `public_note` field of the `TokenClaimTransitionV0`. + fn public_note(&self) -> Option<&String>; + + /// Returns the owned `public_note` field of the `TokenClaimTransitionV0`. + fn public_note_owned(self) -> Option; + + /// Sets the value of the `public_note` field in the `TokenClaimTransitionV0`. + fn set_public_note(&mut self, public_note: Option); +} + +impl TokenClaimTransitionV0Methods for TokenClaimTransitionV0 { + fn distribution_type(&self) -> TokenDistributionType { + self.distribution_type.clone() + } + + fn distribution_type_owned(self) -> TokenDistributionType { + self.distribution_type + } + + fn set_distribution_type(&mut self, distribution_type: TokenDistributionType) { + self.distribution_type = distribution_type; + } + + fn public_note(&self) -> Option<&String> { + self.public_note.as_ref() + } + + fn public_note_owned(self) -> Option { + self.public_note + } + + fn set_public_note(&mut self, public_note: Option) { + self.public_note = public_note; + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0_methods.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0_methods.rs new file mode 100644 index 00000000000..63f70789867 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/v0_methods.rs @@ -0,0 +1,62 @@ +use crate::data_contract::associated_token::token_distribution_key::TokenDistributionType; +use crate::state_transition::batch_transition::token_base_transition::token_base_transition_accessors::TokenBaseTransitionAccessors; +use crate::state_transition::batch_transition::token_base_transition::TokenBaseTransition; +use crate::state_transition::batch_transition::token_claim_transition::v0::v0_methods::TokenClaimTransitionV0Methods; +use crate::state_transition::batch_transition::TokenClaimTransition; + +impl TokenBaseTransitionAccessors for TokenClaimTransition { + fn base(&self) -> &TokenBaseTransition { + match self { + TokenClaimTransition::V0(v0) => &v0.base, + } + } + + fn base_mut(&mut self) -> &mut TokenBaseTransition { + match self { + TokenClaimTransition::V0(v0) => &mut v0.base, + } + } + + fn set_base(&mut self, base: TokenBaseTransition) { + match self { + TokenClaimTransition::V0(v0) => v0.base = base, + } + } +} +impl TokenClaimTransitionV0Methods for TokenClaimTransition { + fn distribution_type(&self) -> TokenDistributionType { + match self { + TokenClaimTransition::V0(v0) => v0.distribution_type(), + } + } + + fn distribution_type_owned(self) -> TokenDistributionType { + match self { + TokenClaimTransition::V0(v0) => v0.distribution_type_owned(), + } + } + + fn set_distribution_type(&mut self, distribution_type: TokenDistributionType) { + match self { + TokenClaimTransition::V0(v0) => v0.set_distribution_type(distribution_type), + } + } + + fn public_note(&self) -> Option<&String> { + match self { + TokenClaimTransition::V0(v0) => v0.public_note(), + } + } + + fn public_note_owned(self) -> Option { + match self { + TokenClaimTransition::V0(v0) => v0.public_note_owned(), + } + } + + fn set_public_note(&mut self, public_note: Option) { + match self { + TokenClaimTransition::V0(v0) => v0.set_public_note(public_note), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..cb2442b324f --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/validate_structure/mod.rs @@ -0,0 +1,36 @@ +use crate::state_transition::batch_transition::token_claim_transition::validate_structure::v0::TokenClaimTransitionActionStructureValidationV0; +use crate::state_transition::batch_transition::TokenClaimTransition; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; +use platform_version::version::PlatformVersion; + +mod v0; + +pub trait TokenClaimTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenClaimTransitionStructureValidation for TokenClaimTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_claim_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenClaimTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..6bd29b50c81 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_claim_transition/validate_structure/v0/mod.rs @@ -0,0 +1,30 @@ +use crate::consensus::basic::token::InvalidTokenNoteTooBigError; +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::state_transition::batch_transition::token_claim_transition::v0::v0_methods::TokenClaimTransitionV0Methods; +use crate::state_transition::batch_transition::TokenClaimTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; + +pub(super) trait TokenClaimTransitionActionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenClaimTransitionActionStructureValidationV0 for TokenClaimTransition { + fn validate_structure_v0(&self) -> Result { + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/mod.rs index f9e937cb3d3..518a06acc48 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..9e361bf8bb7 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/validate_structure/mod.rs @@ -0,0 +1,35 @@ +use platform_version::version::PlatformVersion; +use crate::ProtocolError; +use crate::state_transition::batch_transition::token_config_update_transition::validate_structure::v0::TokenConfigUpdateTransitionStructureValidationV0; +use crate::state_transition::batch_transition::TokenConfigUpdateTransition; +use crate::validation::SimpleConsensusValidationResult; +mod v0; + +pub trait TokenConfigUpdateTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenConfigUpdateTransitionStructureValidation for TokenConfigUpdateTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_config_update_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenConfigUpdateTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..85b7eafd98d --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_config_update_transition/validate_structure/v0/mod.rs @@ -0,0 +1,45 @@ +use crate::consensus::basic::token::{ + InvalidTokenConfigUpdateNoChangeError, InvalidTokenNoteTooBigError, +}; +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; +use crate::state_transition::batch_transition::token_config_update_transition::v0::v0_methods::TokenConfigUpdateTransitionV0Methods; +use crate::state_transition::batch_transition::TokenConfigUpdateTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; + +pub(super) trait TokenConfigUpdateTransitionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenConfigUpdateTransitionStructureValidationV0 for TokenConfigUpdateTransition { + fn validate_structure_v0(&self) -> Result { + if matches!( + self.update_token_configuration_item(), + TokenConfigurationChangeItem::TokenConfigurationNoChange + ) { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenConfigUpdateNoChangeError( + InvalidTokenConfigUpdateNoChangeError::new(), + )), + )); + } + + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/mod.rs index 5d1cd01d774..606eb25cf56 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..2b634a777eb --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/validate_structure/mod.rs @@ -0,0 +1,35 @@ +use platform_version::version::PlatformVersion; +use crate::ProtocolError; +use crate::state_transition::batch_transition::token_destroy_frozen_funds_transition::validate_structure::v0::TokenDestroyFrozenFundsTransitionStructureValidationV0; +use crate::state_transition::batch_transition::TokenDestroyFrozenFundsTransition; +use crate::validation::SimpleConsensusValidationResult; + +mod v0; + +pub trait TokenDestroyFrozenFundsTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} +impl TokenDestroyFrozenFundsTransitionStructureValidation for TokenDestroyFrozenFundsTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_destroy_frozen_funds_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenDestroyFrozenFundsTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..e9eee6c1df9 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_destroy_frozen_funds_transition/validate_structure/v0/mod.rs @@ -0,0 +1,31 @@ +use crate::ProtocolError; +use crate::state_transition::batch_transition::TokenDestroyFrozenFundsTransition; +use crate::validation::SimpleConsensusValidationResult; +use crate::consensus::basic::BasicError; +use crate::consensus::basic::token::InvalidTokenNoteTooBigError; +use crate::consensus::ConsensusError; +use crate::state_transition::batch_transition::token_destroy_frozen_funds_transition::v0::v0_methods::TokenDestroyFrozenFundsTransitionV0Methods; +use crate::tokens::MAX_TOKEN_NOTE_LEN; + +pub(super) trait TokenDestroyFrozenFundsTransitionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenDestroyFrozenFundsTransitionStructureValidationV0 for TokenDestroyFrozenFundsTransition { + fn validate_structure_v0(&self) -> Result { + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/mod.rs index b338e9e75eb..c75d6de3145 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..ef6212c5df0 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/validate_structure/mod.rs @@ -0,0 +1,36 @@ +use platform_version::version::PlatformVersion; +use crate::ProtocolError; +use crate::state_transition::batch_transition::token_emergency_action_transition::validate_structure::v0::TokenEmergencyActionTransitionStructureValidationV0; +use crate::state_transition::batch_transition::TokenEmergencyActionTransition; +use crate::validation::SimpleConsensusValidationResult; + +mod v0; + +pub trait TokenEmergencyActionTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenEmergencyActionTransitionStructureValidation for TokenEmergencyActionTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_emergency_action_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenEmergencyActionTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..eb20e22b3b1 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_emergency_action_transition/validate_structure/v0/mod.rs @@ -0,0 +1,31 @@ +use crate::consensus::basic::token::InvalidTokenNoteTooBigError; +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::state_transition::batch_transition::token_emergency_action_transition::v0::v0_methods::TokenEmergencyActionTransitionV0Methods; +use crate::state_transition::batch_transition::TokenEmergencyActionTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; + +pub(super) trait TokenEmergencyActionTransitionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenEmergencyActionTransitionStructureValidationV0 for TokenEmergencyActionTransition { + fn validate_structure_v0(&self) -> Result { + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/mod.rs index 844454a6610..69df4229a42 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..78c104c60cb --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/validate_structure/mod.rs @@ -0,0 +1,36 @@ +mod v0; + +use crate::state_transition::batch_transition::token_freeze_transition::validate_structure::v0::TokenFreezeTransitionStructureValidationV0; +use crate::state_transition::batch_transition::TokenFreezeTransition; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; +use platform_version::version::PlatformVersion; + +pub trait TokenFreezeTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenFreezeTransitionStructureValidation for TokenFreezeTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_freeze_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenFreezeTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..d17507f87b7 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_freeze_transition/validate_structure/v0/mod.rs @@ -0,0 +1,31 @@ +use crate::consensus::basic::token::InvalidTokenNoteTooBigError; +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::state_transition::batch_transition::token_freeze_transition::v0::v0_methods::TokenFreezeTransitionV0Methods; +use crate::state_transition::batch_transition::TokenFreezeTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; + +pub(super) trait TokenFreezeTransitionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenFreezeTransitionStructureValidationV0 for TokenFreezeTransition { + fn validate_structure_v0(&self) -> Result { + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/mod.rs index a37fe199c3a..26e150657a6 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..5858e0b50bc --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/validate_structure/mod.rs @@ -0,0 +1,36 @@ +use crate::state_transition::batch_transition::token_mint_transition::validate_structure::v0::TokenMintTransitionActionStructureValidationV0; +use crate::state_transition::batch_transition::TokenMintTransition; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; +use platform_version::version::PlatformVersion; + +mod v0; + +pub trait TokenMintTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenMintTransitionStructureValidation for TokenMintTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_mint_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenMintTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..8c4fe050de8 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_mint_transition/validate_structure/v0/mod.rs @@ -0,0 +1,38 @@ +use crate::consensus::basic::token::{InvalidTokenAmountError, InvalidTokenNoteTooBigError}; +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::state_transition::batch_transition::token_mint_transition::v0::v0_methods::TokenMintTransitionV0Methods; +use crate::state_transition::batch_transition::TokenMintTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; + +pub(super) trait TokenMintTransitionActionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenMintTransitionActionStructureValidationV0 for TokenMintTransition { + fn validate_structure_v0(&self) -> Result { + if self.amount() > i64::MAX as u64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenAmountError( + InvalidTokenAmountError::new(i64::MAX as u64, self.amount()), + )), + )); + } + + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/mod.rs index 39c8a54695f..cd292c2e292 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; pub mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..308db8fdb2d --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/validate_structure/mod.rs @@ -0,0 +1,38 @@ +use crate::state_transition::batch_transition::token_transfer_transition::validate_structure::v0::TokenTransferTransitionActionStructureValidationV0; +use crate::state_transition::batch_transition::TokenTransferTransition; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; +use platform_value::Identifier; +use platform_version::version::PlatformVersion; + +mod v0; + +pub trait TokenTransferTransitionStructureValidation { + fn validate_structure( + &self, + owner_id: Identifier, + platform_version: &PlatformVersion, + ) -> Result; +} +impl TokenTransferTransitionStructureValidation for TokenTransferTransition { + fn validate_structure( + &self, + owner_id: Identifier, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_transfer_transition_structure_validation + { + 0 => self.validate_structure_v0(owner_id), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenTransferTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..dc5ebb70621 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transfer_transition/validate_structure/v0/mod.rs @@ -0,0 +1,84 @@ +use platform_value::Identifier; +use crate::consensus::basic::BasicError; +use crate::consensus::basic::token::{InvalidTokenAmountError, InvalidTokenNoteTooBigError, TokenTransferToOurselfError}; +use crate::consensus::ConsensusError; +use crate::ProtocolError; +use crate::state_transition::batch_transition::token_base_transition::token_base_transition_accessors::TokenBaseTransitionAccessors; +use crate::state_transition::batch_transition::token_base_transition::v0::v0_methods::TokenBaseTransitionV0Methods; +use crate::state_transition::batch_transition::token_transfer_transition::v0::v0_methods::TokenTransferTransitionV0Methods; +use crate::state_transition::batch_transition::TokenTransferTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; + +pub(super) trait TokenTransferTransitionActionStructureValidationV0 { + fn validate_structure_v0( + &self, + owner_id: Identifier, + ) -> Result; +} +impl TokenTransferTransitionActionStructureValidationV0 for TokenTransferTransition { + fn validate_structure_v0( + &self, + owner_id: Identifier, + ) -> Result { + if self.amount() > i64::MAX as u64 { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenAmountError( + InvalidTokenAmountError::new(i64::MAX as u64, self.amount()), + )), + )); + } + + if self.recipient_id() == owner_id { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::TokenTransferToOurselfError( + TokenTransferToOurselfError::new(self.base().token_id(), owner_id), + )), + )); + } + + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + + if let Some(shared_encrypted_note) = self.shared_encrypted_note() { + if shared_encrypted_note.2.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "shared_encrypted_note", + shared_encrypted_note.2.len() as u32, + ), + )), + )); + } + } + + if let Some(private_encrypted_note) = self.private_encrypted_note() { + if private_encrypted_note.2.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "private_encrypted_note", + private_encrypted_note.2.len() as u32, + ), + )), + )); + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition.rs index 255c968c369..c9162808e88 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition.rs @@ -9,13 +9,16 @@ use crate::block::block_info::BlockInfo; use crate::data_contract::accessors::v0::DataContractV0Getters; use crate::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; use crate::data_contract::associated_token::token_configuration::TokenConfiguration; +use crate::data_contract::associated_token::token_distribution_key::{TokenDistributionType, TokenDistributionTypeWithResolvedRecipient}; use crate::data_contract::associated_token::token_distribution_rules::accessors::v0::TokenDistributionRulesV0Getters; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::{TokenDistributionRecipient, TokenDistributionResolvedRecipient}; +use crate::data_contract::associated_token::token_perpetual_distribution::methods::v0::TokenPerpetualDistributionV0Accessors; use crate::data_contract::DataContract; use crate::data_contract::document_type::DocumentTypeRef; use crate::document::Document; use crate::prelude::IdentityNonce; use crate::ProtocolError; -use crate::state_transition::batch_transition::{DocumentCreateTransition, DocumentDeleteTransition, DocumentReplaceTransition, TokenBurnTransition, TokenConfigUpdateTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, TokenMintTransition, TokenTransferTransition}; +use crate::state_transition::batch_transition::{DocumentCreateTransition, DocumentDeleteTransition, DocumentReplaceTransition, TokenBurnTransition, TokenConfigUpdateTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, TokenMintTransition, TokenClaimTransition, TokenTransferTransition}; use crate::state_transition::batch_transition::batched_transition::{DocumentPurchaseTransition, DocumentTransferTransition}; use crate::state_transition::batch_transition::batched_transition::multi_party_action::AllowedAsMultiPartyAction; use crate::state_transition::batch_transition::batched_transition::token_unfreeze_transition::TokenUnfreezeTransition; @@ -29,6 +32,7 @@ use crate::state_transition::batch_transition::token_destroy_frozen_funds_transi use crate::state_transition::batch_transition::token_emergency_action_transition::v0::v0_methods::TokenEmergencyActionTransitionV0Methods; use crate::state_transition::batch_transition::token_freeze_transition::v0::v0_methods::TokenFreezeTransitionV0Methods; use crate::state_transition::batch_transition::token_mint_transition::v0::v0_methods::TokenMintTransitionV0Methods; +use crate::state_transition::batch_transition::token_claim_transition::v0::v0_methods::TokenClaimTransitionV0Methods; use crate::state_transition::batch_transition::token_transfer_transition::v0::v0_methods::TokenTransferTransitionV0Methods; use crate::state_transition::batch_transition::token_unfreeze_transition::v0::v0_methods::TokenUnfreezeTransitionV0Methods; use crate::tokens::token_event::TokenEvent; @@ -62,6 +66,9 @@ pub enum TokenTransition { #[display("TokenDestroyFrozenFundsTransition({})", "_0")] DestroyFrozenFunds(TokenDestroyFrozenFundsTransition), + #[display("TokenClaimTransition({})", "_0")] + Claim(TokenClaimTransition), + #[display("TokenEmergencyActionTransition({})", "_0")] EmergencyAction(TokenEmergencyActionTransition), @@ -138,6 +145,14 @@ impl BatchTransitionResolversV0 for TokenTransition { } } + fn as_transition_token_claim(&self) -> Option<&TokenClaimTransition> { + if let Self::Claim(ref t) = self { + Some(t) + } else { + None + } + } + fn as_transition_token_emergency_action(&self) -> Option<&TokenEmergencyActionTransition> { if let Self::EmergencyAction(ref t) = self { Some(t) @@ -145,6 +160,14 @@ impl BatchTransitionResolversV0 for TokenTransition { None } } + + fn as_transition_token_config_update(&self) -> Option<&TokenConfigUpdateTransition> { + if let Self::ConfigUpdate(ref t) = self { + Some(t) + } else { + None + } + } } pub trait TokenTransitionV0Methods { @@ -185,6 +208,7 @@ pub trait TokenTransitionV0Methods { fn associated_token_event( &self, token_configuration: &TokenConfiguration, + contract_owner_id: Identifier, ) -> Result; /// Historical document id fn build_historical_document( @@ -208,6 +232,7 @@ impl TokenTransitionV0Methods for TokenTransition { TokenTransition::Freeze(t) => t.base(), TokenTransition::Unfreeze(t) => t.base(), TokenTransition::DestroyFrozenFunds(t) => t.base(), + TokenTransition::Claim(t) => t.base(), TokenTransition::EmergencyAction(t) => t.base(), TokenTransition::ConfigUpdate(t) => t.base(), } @@ -221,6 +246,7 @@ impl TokenTransitionV0Methods for TokenTransition { TokenTransition::Freeze(t) => t.base_mut(), TokenTransition::Unfreeze(t) => t.base_mut(), TokenTransition::DestroyFrozenFunds(t) => t.base_mut(), + TokenTransition::Claim(t) => t.base_mut(), TokenTransition::EmergencyAction(t) => t.base_mut(), TokenTransition::ConfigUpdate(t) => t.base_mut(), } @@ -238,6 +264,7 @@ impl TokenTransitionV0Methods for TokenTransition { TokenTransition::Unfreeze(t) => Some(t.calculate_action_id(owner_id)), TokenTransition::Transfer(_) => None, TokenTransition::DestroyFrozenFunds(t) => Some(t.calculate_action_id(owner_id)), + TokenTransition::Claim(_) => None, TokenTransition::EmergencyAction(t) => Some(t.calculate_action_id(owner_id)), TokenTransition::ConfigUpdate(t) => Some(t.calculate_action_id(owner_id)), } @@ -252,7 +279,7 @@ impl TokenTransitionV0Methods for TokenTransition { | TokenTransition::DestroyFrozenFunds(_) | TokenTransition::EmergencyAction(_) | TokenTransition::ConfigUpdate(_) => true, - TokenTransition::Transfer(_) => false, + TokenTransition::Transfer(_) | TokenTransition::Claim(_) => false, } } @@ -287,6 +314,7 @@ impl TokenTransitionV0Methods for TokenTransition { TokenTransition::EmergencyAction(_) => "emergencyAction", TokenTransition::DestroyFrozenFunds(_) => "destroyFrozenFunds", TokenTransition::ConfigUpdate(_) => "configUpdate", + TokenTransition::Claim(_) => "claim", } } @@ -324,7 +352,7 @@ impl TokenTransitionV0Methods for TokenTransition { token_configuration: &TokenConfiguration, platform_version: &PlatformVersion, ) -> Result { - self.associated_token_event(token_configuration)? + self.associated_token_event(token_configuration, owner_id)? .build_historical_document_owned( token_historical_contract, token_id, @@ -338,6 +366,7 @@ impl TokenTransitionV0Methods for TokenTransition { fn associated_token_event( &self, token_configuration: &TokenConfiguration, + owner_id: Identifier, ) -> Result { Ok(match self { TokenTransition::Burn(burn) => { @@ -382,6 +411,44 @@ impl TokenTransitionV0Methods for TokenTransition { config_update.update_token_configuration_item().clone(), config_update.public_note().cloned(), ), + TokenTransition::Claim(claim) => { + let distribution_rules = token_configuration.distribution_rules(); + let distribution_recipient = match claim.distribution_type() { + TokenDistributionType::PreProgrammed => { + if distribution_rules.pre_programmed_distribution().is_none() { + return Err(ProtocolError::NotSupported("Token claiming of pre programmed distribution is not supported on this token".to_string())); + } + TokenDistributionTypeWithResolvedRecipient::PreProgrammed(owner_id) + } + TokenDistributionType::Perpetual => { + let Some(perpetual_distribution) = + distribution_rules.perpetual_distribution() + else { + return Err(ProtocolError::NotSupported("Token claiming of perpetual distribution is not supported on this token".to_string())); + }; + let recipient = match perpetual_distribution.distribution_recipient() { + TokenDistributionRecipient::ContractOwner => { + TokenDistributionResolvedRecipient::ContractOwnerIdentity(owner_id) + } + TokenDistributionRecipient::Identity(identifier) => { + TokenDistributionResolvedRecipient::ContractOwnerIdentity( + identifier, + ) + } + TokenDistributionRecipient::EvonodesByParticipation => { + TokenDistributionResolvedRecipient::Evonode(owner_id) + } + }; + TokenDistributionTypeWithResolvedRecipient::Perpetual(recipient) + } + }; + + TokenEvent::Claim( + distribution_recipient, + TokenAmount::MAX, // we do not know how much will be released + claim.public_note().cloned(), + ) + } }) } } diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition_action_type.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition_action_type.rs index 818c492021a..ea464d5e88d 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition_action_type.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_transition_action_type.rs @@ -11,6 +11,7 @@ pub enum TokenTransitionActionType { Freeze, Unfreeze, DestroyFrozenFunds, + Claim, EmergencyAction, ConfigUpdate, } @@ -24,6 +25,7 @@ impl fmt::Display for TokenTransitionActionType { TokenTransitionActionType::Freeze => "Freeze", TokenTransitionActionType::Unfreeze => "Unfreeze", TokenTransitionActionType::DestroyFrozenFunds => "DestroyFrozenFunds", + TokenTransitionActionType::Claim => "Claim", TokenTransitionActionType::EmergencyAction => "EmergencyAction", TokenTransitionActionType::ConfigUpdate => "ConfigUpdate", }; @@ -44,6 +46,7 @@ impl TokenTransitionActionTypeGetter for TokenTransition { TokenTransition::Freeze(_) => TokenTransitionActionType::Freeze, TokenTransition::Unfreeze(_) => TokenTransitionActionType::Unfreeze, TokenTransition::DestroyFrozenFunds(_) => TokenTransitionActionType::DestroyFrozenFunds, + TokenTransition::Claim(_) => TokenTransitionActionType::Claim, TokenTransition::EmergencyAction(_) => TokenTransitionActionType::EmergencyAction, TokenTransition::ConfigUpdate(_) => TokenTransitionActionType::ConfigUpdate, } @@ -60,12 +63,14 @@ impl TryFrom<&str> for TokenTransitionActionType { "transfer" => Ok(TokenTransitionActionType::Transfer), "freeze" => Ok(TokenTransitionActionType::Freeze), "unfreeze" => Ok(TokenTransitionActionType::Unfreeze), + "claim" => Ok(TokenTransitionActionType::Claim), "destroy_frozen_funds" | "destroyFrozenFunds" => { Ok(TokenTransitionActionType::DestroyFrozenFunds) } "emergency_action" | "emergencyAction" => { Ok(TokenTransitionActionType::EmergencyAction) } + "config_update" | "configUpdate" => Ok(TokenTransitionActionType::ConfigUpdate), action_type => Err(ProtocolError::Generic(format!( "unknown token transition action type {action_type}" ))), diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/mod.rs index 4cefde0effb..6b9f3c392cb 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/mod.rs @@ -1,5 +1,6 @@ pub mod v0; mod v0_methods; +pub mod validate_structure; use bincode::{Decode, Encode}; use derive_more::{Display, From}; diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/validate_structure/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/validate_structure/mod.rs new file mode 100644 index 00000000000..5dbf45eb1fd --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/validate_structure/mod.rs @@ -0,0 +1,36 @@ +mod v0; + +use crate::state_transition::batch_transition::token_unfreeze_transition::validate_structure::v0::TokenUnfreezeTransitionStructureValidationV0; +use crate::state_transition::batch_transition::TokenUnfreezeTransition; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; +use platform_version::version::PlatformVersion; + +pub trait TokenUnfreezeTransitionStructureValidation { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenUnfreezeTransitionStructureValidation for TokenUnfreezeTransition { + fn validate_structure( + &self, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_unfreeze_transition_structure_validation + { + 0 => self.validate_structure_v0(), + version => Err(ProtocolError::UnknownVersionMismatch { + method: "TokenUnfreezeTransition::validate_structure".to_string(), + known_versions: vec![0], + received: version, + }), + } + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/validate_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/validate_structure/v0/mod.rs new file mode 100644 index 00000000000..d02ab5e0dd1 --- /dev/null +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/batched_transition/token_unfreeze_transition/validate_structure/v0/mod.rs @@ -0,0 +1,31 @@ +use crate::consensus::basic::token::InvalidTokenNoteTooBigError; +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::state_transition::batch_transition::token_unfreeze_transition::v0::v0_methods::TokenUnfreezeTransitionV0Methods; +use crate::state_transition::batch_transition::TokenUnfreezeTransition; +use crate::tokens::MAX_TOKEN_NOTE_LEN; +use crate::validation::SimpleConsensusValidationResult; +use crate::ProtocolError; + +pub(super) trait TokenUnfreezeTransitionStructureValidationV0 { + fn validate_structure_v0(&self) -> Result; +} +impl TokenUnfreezeTransitionStructureValidationV0 for TokenUnfreezeTransition { + fn validate_structure_v0(&self) -> Result { + if let Some(public_note) = self.public_note() { + if public_note.len() > MAX_TOKEN_NOTE_LEN { + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::BasicError(BasicError::InvalidTokenNoteTooBigError( + InvalidTokenNoteTooBigError::new( + MAX_TOKEN_NOTE_LEN as u32, + "public_note", + public_note.len() as u32, + ), + )), + )); + } + } + + Ok(SimpleConsensusValidationResult::default()) + } +} diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/mod.rs index 5c7a5381495..11de4e36561 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/mod.rs @@ -2,6 +2,7 @@ use crate::balances::credits::TokenAmount; #[cfg(feature = "state-transition-signing")] use crate::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; +use crate::data_contract::associated_token::token_distribution_key::TokenDistributionType; #[cfg(feature = "state-transition-signing")] use crate::data_contract::document_type::DocumentTypeRef; #[cfg(feature = "state-transition-signing")] @@ -916,4 +917,62 @@ impl DocumentsBatchTransitionMethodsV1 for BatchTransition { }), } } + + #[cfg(feature = "state-transition-signing")] + fn new_token_claim_transition( + token_id: Identifier, + owner_id: Identifier, + data_contract_id: Identifier, + token_contract_position: u16, + distribution_type: TokenDistributionType, + public_note: Option, + identity_public_key: &IdentityPublicKey, + identity_contract_nonce: IdentityNonce, + user_fee_increase: UserFeeIncrease, + signer: &S, + platform_version: &PlatformVersion, + batch_feature_version: Option, + config_update_feature_version: Option, + base_feature_version: Option, + ) -> Result { + match batch_feature_version.unwrap_or( + platform_version + .dpp + .state_transition_serialization_versions + .batch_state_transition + .default_current_version, + ) { + 1 | 0 + if platform_version + .dpp + .state_transition_serialization_versions + .batch_state_transition + .max_version + >= 1 => + { + // Create the emergency action transition for batch version 1 + BatchTransitionV1::new_token_claim_transition( + token_id, + owner_id, + data_contract_id, + token_contract_position, + distribution_type, + public_note, + identity_public_key, + identity_contract_nonce, + user_fee_increase, + signer, + platform_version, + batch_feature_version, + config_update_feature_version, + base_feature_version, + ) + } + version => Err(ProtocolError::UnknownVersionMismatch { + method: "DocumentsBatchTransition::new_token_claim_transition".to_string(), + known_versions: vec![1], + received: version, + }), + } + } } diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/v1/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/v1/mod.rs index f17e54aa2bc..05e5806387a 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/v1/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/methods/v1/mod.rs @@ -3,6 +3,8 @@ use crate::balances::credits::TokenAmount; #[cfg(feature = "state-transition-signing")] use crate::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; #[cfg(feature = "state-transition-signing")] +use crate::data_contract::associated_token::token_distribution_key::TokenDistributionType; +#[cfg(feature = "state-transition-signing")] use crate::group::GroupStateTransitionInfoStatus; #[cfg(feature = "state-transition-signing")] use crate::identity::signer::Signer; @@ -181,4 +183,22 @@ pub trait DocumentsBatchTransitionMethodsV1: DocumentsBatchTransitionAccessorsV0 config_update_feature_version: Option, base_feature_version: Option, ) -> Result; + + #[cfg(feature = "state-transition-signing")] + fn new_token_claim_transition( + token_id: Identifier, + owner_id: Identifier, + data_contract_id: Identifier, + token_contract_position: u16, + distribution_type: TokenDistributionType, + public_note: Option, + identity_public_key: &IdentityPublicKey, + identity_contract_nonce: IdentityNonce, + user_fee_increase: UserFeeIncrease, + signer: &S, + platform_version: &PlatformVersion, + batch_feature_version: Option, + config_update_feature_version: Option, + base_feature_version: Option, + ) -> Result; } diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/mod.rs index 4b33e2e79e5..4beb0386c26 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/mod.rs @@ -16,8 +16,9 @@ pub use self::batched_transition::{ document_create_transition::DocumentCreateTransition, document_delete_transition, document_delete_transition::DocumentDeleteTransition, document_replace_transition, document_replace_transition::DocumentReplaceTransition, token_base_transition, - token_burn_transition, token_burn_transition::TokenBurnTransition, - token_config_update_transition, token_config_update_transition::TokenConfigUpdateTransition, + token_burn_transition, token_burn_transition::TokenBurnTransition, token_claim_transition, + token_claim_transition::TokenClaimTransition, token_config_update_transition, + token_config_update_transition::TokenConfigUpdateTransition, token_destroy_frozen_funds_transition, token_destroy_frozen_funds_transition::TokenDestroyFrozenFundsTransition, token_emergency_action_transition, diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/resolvers/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/resolvers/v0/mod.rs index 8dd6fd6ec1c..687ece25889 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/resolvers/v0/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/resolvers/v0/mod.rs @@ -4,8 +4,9 @@ use crate::state_transition::batch_transition::batched_transition::{ }; use crate::state_transition::batch_transition::{ DocumentCreateTransition, DocumentDeleteTransition, DocumentReplaceTransition, - TokenBurnTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, - TokenFreezeTransition, TokenMintTransition, TokenTransferTransition, + TokenBurnTransition, TokenClaimTransition, TokenConfigUpdateTransition, + TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, + TokenMintTransition, TokenTransferTransition, }; pub trait BatchTransitionResolversV0 { @@ -22,5 +23,9 @@ pub trait BatchTransitionResolversV0 { fn as_transition_token_destroy_frozen_funds( &self, ) -> Option<&TokenDestroyFrozenFundsTransition>; + + fn as_transition_token_claim(&self) -> Option<&TokenClaimTransition>; fn as_transition_token_emergency_action(&self) -> Option<&TokenEmergencyActionTransition>; + + fn as_transition_token_config_update(&self) -> Option<&TokenConfigUpdateTransition>; } diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/v1/v0_methods.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/v1/v0_methods.rs index fc3bb2eb527..b1444757054 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/v1/v0_methods.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/v1/v0_methods.rs @@ -27,7 +27,9 @@ use crate::state_transition::batch_transition::methods::v0::DocumentsBatchTransi use std::iter::Map; use std::slice::Iter; -use crate::state_transition::batch_transition::{BatchTransitionV1, TokenBurnTransition, TokenConfigUpdateTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, TokenMintTransition, TokenTransferTransition, TokenUnfreezeTransition}; +use crate::state_transition::batch_transition::{BatchTransitionV1, TokenClaimTransition}; +#[cfg(feature = "state-transition-signing")] +use crate::state_transition::batch_transition::{TokenBurnTransition, TokenConfigUpdateTransition, TokenDestroyFrozenFundsTransition, TokenEmergencyActionTransition, TokenFreezeTransition, TokenMintTransition, TokenTransferTransition, TokenUnfreezeTransition}; #[cfg(feature = "state-transition-signing")] use crate::state_transition::batch_transition::{ BatchTransition, DocumentDeleteTransition, @@ -43,6 +45,7 @@ use platform_version::version::{FeatureVersion, PlatformVersion}; use crate::balances::credits::TokenAmount; #[cfg(feature = "state-transition-signing")] use crate::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; +use crate::data_contract::associated_token::token_distribution_key::TokenDistributionType; #[cfg(feature = "state-transition-signing")] use crate::group::{GroupStateTransitionInfo, GroupStateTransitionInfoStatus}; use crate::state_transition::batch_transition::document_create_transition::v0::v0_methods::DocumentCreateTransitionV0Methods; @@ -61,6 +64,7 @@ use crate::state_transition::batch_transition::token_base_transition::v0::TokenB use crate::state_transition::batch_transition::token_base_transition::v0::v0_methods::TokenBaseTransitionV0Methods; #[cfg(feature = "state-transition-signing")] use crate::state_transition::batch_transition::token_burn_transition::TokenBurnTransitionV0; +use crate::state_transition::batch_transition::token_claim_transition::TokenClaimTransitionV0; #[cfg(feature = "state-transition-signing")] use crate::state_transition::batch_transition::token_config_update_transition::TokenConfigUpdateTransitionV0; #[cfg(feature = "state-transition-signing")] @@ -974,4 +978,50 @@ impl DocumentsBatchTransitionMethodsV1 for BatchTransitionV1 { )?; Ok(state_transition) } + + #[cfg(feature = "state-transition-signing")] + fn new_token_claim_transition( + token_id: Identifier, + owner_id: Identifier, + data_contract_id: Identifier, + token_contract_position: u16, + distribution_type: TokenDistributionType, + public_note: Option, + identity_public_key: &IdentityPublicKey, + identity_contract_nonce: IdentityNonce, + user_fee_increase: UserFeeIncrease, + signer: &S, + _platform_version: &PlatformVersion, + _batch_feature_version: Option, + _config_update_feature_version: Option, + _base_feature_version: Option, + ) -> Result { + let claim_transition = TokenClaimTransition::V0(TokenClaimTransitionV0 { + base: TokenBaseTransition::V0(TokenBaseTransitionV0 { + identity_contract_nonce, + token_contract_position, + data_contract_id, + token_id, + using_group_info: None, + }), + distribution_type, + public_note, + }); + + let batch_transition: BatchTransition = BatchTransitionV1 { + owner_id, + transitions: vec![BatchedTransition::Token(claim_transition.into())], + user_fee_increase, + signature_public_key_id: 0, + signature: Default::default(), + } + .into(); + let mut state_transition: StateTransition = batch_transition.into(); + state_transition.sign_external( + identity_public_key, + signer, + Some(|_, _| Ok(SecurityLevel::CRITICAL)), + )?; + Ok(state_transition) + } } diff --git a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/validation/validate_basic_structure/v0/mod.rs b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/validation/validate_basic_structure/v0/mod.rs index fa69e5a93ff..3ab68d5219b 100644 --- a/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/validation/validate_basic_structure/v0/mod.rs +++ b/packages/rs-dpp/src/state_transition/state_transitions/document/batch_transition/validation/validate_basic_structure/v0/mod.rs @@ -16,14 +16,22 @@ use platform_version::version::PlatformVersion; use std::collections::btree_map::Entry; use std::collections::BTreeMap; use crate::consensus::basic::group::GroupActionNotAllowedOnTransitionError; -use crate::consensus::basic::token::{InvalidActionIdError, InvalidTokenIdError, TokenTransferToOurselfError}; +use crate::consensus::basic::token::{InvalidActionIdError, InvalidTokenIdError}; use crate::state_transition::batch_transition::batched_transition::BatchedTransitionRef; use crate::state_transition::batch_transition::batched_transition::token_transition::{TokenTransition, TokenTransitionV0Methods}; use crate::state_transition::batch_transition::batched_transition::token_transition_action_type::TokenTransitionActionTypeGetter; use crate::state_transition::batch_transition::token_base_transition::v0::v0_methods::TokenBaseTransitionV0Methods; -use crate::state_transition::batch_transition::token_transfer_transition::v0::v0_methods::TokenTransferTransitionV0Methods; +use crate::state_transition::batch_transition::token_config_update_transition::validate_structure::TokenConfigUpdateTransitionStructureValidation; +use crate::state_transition::batch_transition::token_destroy_frozen_funds_transition::validate_structure::TokenDestroyFrozenFundsTransitionStructureValidation; +use crate::state_transition::batch_transition::token_emergency_action_transition::validate_structure::TokenEmergencyActionTransitionStructureValidation; +use crate::state_transition::batch_transition::token_freeze_transition::validate_structure::TokenFreezeTransitionStructureValidation; +use crate::state_transition::batch_transition::token_mint_transition::validate_structure::TokenMintTransitionStructureValidation; +use crate::state_transition::batch_transition::token_claim_transition::validate_structure::TokenClaimTransitionStructureValidation; +use crate::state_transition::batch_transition::token_transfer_transition::validate_structure::TokenTransferTransitionStructureValidation; +use crate::state_transition::batch_transition::token_unfreeze_transition::validate_structure::TokenUnfreezeTransitionStructureValidation; use crate::state_transition::state_transitions::document::batch_transition::batched_transition::document_transition::{DocumentTransition, DocumentTransitionV0Methods}; use crate::state_transition::StateTransitionLike; +use crate::state_transition::state_transitions::document::batch_transition::batched_transition::token_burn_transition::validate_structure::TokenBurnTransitionStructureValidation; impl BatchTransition { #[inline(always)] @@ -134,25 +142,38 @@ impl BatchTransition { ))); } - match transition { - TokenTransition::Burn(_) => {} - TokenTransition::Mint(_) => {} - TokenTransition::Transfer(transfer) => { - if transfer.recipient_id() == self.owner_id() { - // We can not transfer to ourselves - result.add_error(BasicError::TokenTransferToOurselfError( - TokenTransferToOurselfError::new( - transition.token_id(), - self.owner_id(), - ), - )); - } + let consensus_result = match transition { + TokenTransition::Burn(burn_transition) => { + burn_transition.validate_structure(platform_version)? + } + TokenTransition::Mint(mint_transition) => { + mint_transition.validate_structure(platform_version)? + } + TokenTransition::Transfer(transfer_transition) => { + transfer_transition.validate_structure(self.owner_id(), platform_version)? + } + TokenTransition::Freeze(freeze_transition) => { + freeze_transition.validate_structure(platform_version)? } - TokenTransition::Freeze(_) => {} - TokenTransition::Unfreeze(_) => {} - TokenTransition::DestroyFrozenFunds(_) => {} - TokenTransition::EmergencyAction(_) => {} - TokenTransition::ConfigUpdate(_) => {} + TokenTransition::Unfreeze(unfreeze_transition) => { + unfreeze_transition.validate_structure(platform_version)? + } + TokenTransition::DestroyFrozenFunds(destroy_frozen_funds_transition) => { + destroy_frozen_funds_transition.validate_structure(platform_version)? + } + TokenTransition::EmergencyAction(emergency_action_transition) => { + emergency_action_transition.validate_structure(platform_version)? + } + TokenTransition::ConfigUpdate(config_update_transition) => { + config_update_transition.validate_structure(platform_version)? + } + TokenTransition::Claim(release_transition) => { + release_transition.validate_structure(platform_version)? + } + }; + + if !consensus_result.is_valid() { + return Ok(consensus_result); } // We need to verify that the action id given matches the expected action id diff --git a/packages/rs-dpp/src/tokens/mod.rs b/packages/rs-dpp/src/tokens/mod.rs index b05c9bdbd95..7ef624e086f 100644 --- a/packages/rs-dpp/src/tokens/mod.rs +++ b/packages/rs-dpp/src/tokens/mod.rs @@ -11,6 +11,7 @@ pub mod info; pub mod status; pub mod token_event; +pub const MAX_TOKEN_NOTE_LEN: usize = 2048; pub type SharedEncryptedNote = (SenderKeyIndex, RecipientKeyIndex, Vec); pub type PrivateEncryptedNote = ( RootEncryptionKeyIndex, diff --git a/packages/rs-dpp/src/tokens/token_event.rs b/packages/rs-dpp/src/tokens/token_event.rs index 82c72825d70..fc2f38054da 100644 --- a/packages/rs-dpp/src/tokens/token_event.rs +++ b/packages/rs-dpp/src/tokens/token_event.rs @@ -2,6 +2,8 @@ use crate::balances::credits::TokenAmount; use crate::block::block_info::BlockInfo; use crate::data_contract::accessors::v0::DataContractV0Getters; use crate::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; +use crate::data_contract::associated_token::token_distribution_key::TokenDistributionTypeWithResolvedRecipient; +use crate::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionResolvedRecipient; use crate::data_contract::document_type::DocumentTypeRef; use crate::document::{Document, DocumentV0}; use crate::prelude::{ @@ -45,6 +47,11 @@ pub enum TokenEvent { TokenEventPersonalEncryptedNote, TokenAmount, ), + Claim( + TokenDistributionTypeWithResolvedRecipient, + TokenAmount, + TokenEventPublicNote, + ), EmergencyAction(TokenEmergencyAction, TokenEventPublicNote), ConfigUpdate(TokenConfigurationChangeItem, TokenEventPublicNote), } @@ -52,14 +59,15 @@ pub enum TokenEvent { impl TokenEvent { pub fn associated_document_type_name(&self) -> &str { match self { - TokenEvent::Mint(_, _, _) => "mint", - TokenEvent::Burn(_, _) => "burn", - TokenEvent::Freeze(_, _) => "freeze", - TokenEvent::Unfreeze(_, _) => "unfreeze", - TokenEvent::DestroyFrozenFunds(_, _, _) => "destroyFrozenFunds", - TokenEvent::Transfer(_, _, _, _, _) => "transfer", - TokenEvent::EmergencyAction(_, _) => "emergencyAction", - TokenEvent::ConfigUpdate(_, _) => "configUpdate", + TokenEvent::Mint(..) => "mint", + TokenEvent::Burn(..) => "burn", + TokenEvent::Freeze(..) => "freeze", + TokenEvent::Unfreeze(..) => "unfreeze", + TokenEvent::DestroyFrozenFunds(..) => "destroyFrozenFunds", + TokenEvent::Transfer(..) => "transfer", + TokenEvent::Claim(..) => "claim", + TokenEvent::EmergencyAction(..) => "emergencyAction", + TokenEvent::ConfigUpdate(..) => "configUpdate", } } @@ -206,6 +214,38 @@ impl TokenEvent { } properties } + TokenEvent::Claim(recipient, amount, public_note) => { + let (recipient_type, recipient_id, distribution_type) = match recipient { + TokenDistributionTypeWithResolvedRecipient::PreProgrammed(identifier) => { + (1u8, Some(identifier.into()), 0u8) + } + TokenDistributionTypeWithResolvedRecipient::Perpetual( + TokenDistributionResolvedRecipient::ContractOwnerIdentity(identifier), + ) => (0, Some(identifier.into()), 1), + TokenDistributionTypeWithResolvedRecipient::Perpetual( + TokenDistributionResolvedRecipient::Identity(identifier), + ) => (1, Some(identifier.into()), 1), + TokenDistributionTypeWithResolvedRecipient::Perpetual( + TokenDistributionResolvedRecipient::Evonode(identifier), + ) => (2, Some(identifier.into()), 1), + }; + + let mut properties = BTreeMap::from([ + ("tokenId".to_string(), token_id.into()), + ("recipientType".to_string(), recipient_type.into()), + ("distributionType".to_string(), distribution_type.into()), + ("amount".to_string(), amount.into()), + ]); + + if let Some(id) = recipient_id { + properties.insert("recipientId".to_string(), id); + } + + if let Some(note) = public_note { + properties.insert("note".to_string(), note.into()); + } + properties + } }; let document: Document = DocumentV0 { diff --git a/packages/rs-drive-abci/Cargo.toml b/packages/rs-drive-abci/Cargo.toml index 3b9bd54a61f..f973b2cea8e 100644 --- a/packages/rs-drive-abci/Cargo.toml +++ b/packages/rs-drive-abci/Cargo.toml @@ -119,6 +119,5 @@ grovedbg = ["drive/grovedbg"] name = "drive-abci" path = "src/main.rs" - [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/packages/rs-drive-abci/src/execution/check_tx/v0/mod.rs b/packages/rs-drive-abci/src/execution/check_tx/v0/mod.rs index 06292c23421..e1e1a3adc5e 100644 --- a/packages/rs-drive-abci/src/execution/check_tx/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/check_tx/v0/mod.rs @@ -630,7 +630,7 @@ mod tests { ) .expect("expected to process state transition"); - assert_eq!(processing_result.aggregated_fees().processing_fee, 2484410); + assert_eq!(processing_result.aggregated_fees().processing_fee, 2488010); let check_result = platform .check_tx( @@ -1141,7 +1141,7 @@ mod tests { // The processing fees should be twice as much as a fee multiplier of 0, // since a fee multiplier of 100 means 100% more of 1 (gives 2) - assert_eq!(processing_result.aggregated_fees().processing_fee, 4968820); + assert_eq!(processing_result.aggregated_fees().processing_fee, 4976020); let check_result = platform .check_tx( @@ -1613,7 +1613,7 @@ mod tests { ) .expect("expected to process state transition"); - assert_eq!(processing_result.aggregated_fees().processing_fee, 2484410); + assert_eq!(processing_result.aggregated_fees().processing_fee, 2488010); platform .drive @@ -1699,7 +1699,7 @@ mod tests { assert_eq!( update_processing_result.aggregated_fees().processing_fee, - 2496910 + 2502650 ); let check_result = platform @@ -2069,7 +2069,7 @@ mod tests { ) .expect("expected to process state transition"); - assert_eq!(processing_result.aggregated_fees().processing_fee, 2484410); + assert_eq!(processing_result.aggregated_fees().processing_fee, 2488010); platform .drive diff --git a/packages/rs-drive-abci/src/execution/platform_events/block_processing_end_events/process_block_fees_and_validate_sum_trees/v0/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/block_processing_end_events/process_block_fees_and_validate_sum_trees/v0/mod.rs index a3fa6719562..6118ae984cc 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/block_processing_end_events/process_block_fees_and_validate_sum_trees/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/block_processing_end_events/process_block_fees_and_validate_sum_trees/v0/mod.rs @@ -110,6 +110,10 @@ impl Platform { epoch_info.current_epoch_index(), cached_current_epoch_start_block_height, cached_current_epoch_start_block_core_height, + storage_fee_distribution_outcome + .as_ref() + .map(|s| s.total_distributed_storage_fees) + .unwrap_or_default(), transaction, &mut batch, platform_version, @@ -181,7 +185,7 @@ impl Platform { credits_verified, credits_verified .total_in_trees() - .unwrap() + .expect("we already checked that there was no overflow in credits_verified.ok()") .abs_diff(credits_verified.total_credits_in_platform) )), )); diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_inwards_distribution/add_distribute_storage_fee_to_epochs_operations/v0/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_inwards_distribution/add_distribute_storage_fee_to_epochs_operations/v0/mod.rs index 2773ae01ac2..8d9eeb54743 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_inwards_distribution/add_distribute_storage_fee_to_epochs_operations/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_inwards_distribution/add_distribute_storage_fee_to_epochs_operations/v0/mod.rs @@ -64,6 +64,7 @@ impl Platform { Ok( storage_fee_distribution_outcome::v0::StorageFeeDistributionOutcome { + total_distributed_storage_fees: storage_distribution_fees, leftovers, refunded_epochs_count, }, diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/mod.rs index 7c90d35c61e..a25fcb44e37 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/mod.rs @@ -1,7 +1,9 @@ mod v0; +mod v1; use crate::error::execution::ExecutionError; use crate::error::Error; +use dpp::fee::Credits; use crate::execution::types::proposer_payouts::v0::ProposersPayouts; use crate::platform_types::platform::Platform; @@ -34,6 +36,7 @@ impl Platform { current_epoch_index: u16, cached_current_epoch_start_block_height: Option, cached_current_epoch_start_block_core_height: Option, + total_distributed_storage_fees: Credits, transaction: &Transaction, batch: &mut Vec, platform_version: &PlatformVersion, @@ -52,6 +55,15 @@ impl Platform { batch, platform_version, ), + 1 => self.add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations_v1( + current_epoch_index, + cached_current_epoch_start_block_height, + cached_current_epoch_start_block_core_height, + total_distributed_storage_fees, + transaction, + batch, + platform_version, + ), version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { method: "add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations" .to_string(), diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/v0/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/v0/mod.rs index 47f1f408728..0864f19a1fb 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/v0/mod.rs @@ -76,13 +76,15 @@ impl Platform { let unpaid_epoch = unpaid_epoch.into(); - let proposers_paid_count = self.add_epoch_pool_to_proposers_payout_operations( - &unpaid_epoch, - core_block_rewards, - transaction, - batch, - platform_version, - )?; + let proposers_paid_count = self + .add_epoch_pool_to_proposers_payout_operations( + &unpaid_epoch, + core_block_rewards, + transaction, + batch, + platform_version, + )? + .1; let mut inner_batch = GroveDbOpBatch::new(); @@ -98,12 +100,12 @@ impl Platform { // We paid to all epoch proposers last block. Since proposers paid count // was equal to proposers limit, we paid to 0 proposers this block - if proposers_paid_count == 0 { + if proposers_paid_count.len() == 0 { return Ok(None); } Ok(Some(proposer_payouts::v0::ProposersPayouts { - proposers_paid_count, + proposers_paid_count: proposers_paid_count.len() as u16, paid_epoch_index: unpaid_epoch.epoch_index(), })) } diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/v1/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/v1/mod.rs new file mode 100644 index 00000000000..b8c3f70ecd8 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations/v1/mod.rs @@ -0,0 +1,307 @@ +use crate::error::Error; +use crate::execution::types::proposer_payouts; +use crate::platform_types::platform::Platform; +use dpp::block::epoch::Epoch; +use dpp::block::finalized_epoch_info::v0::FinalizedEpochInfoV0; +use dpp::block::finalized_epoch_info::FinalizedEpochInfo; +use dpp::core_subsidy::epoch_core_reward_credits_for_distribution::epoch_core_reward_credits_for_distribution; +use dpp::core_subsidy::NetworkCoreSubsidy; +use dpp::fee::Credits; +use dpp::version::PlatformVersion; +use drive::drive::credit_pools::epochs::operations_factory::EpochOperations; +use drive::drive::credit_pools::operations::update_unpaid_epoch_index_operation; +use drive::util::batch::grovedb_op_batch::GroveDbOpBatchV0Methods; +use drive::util::batch::{DriveOperation, GroveDbOpBatch, SystemOperationType}; + +use crate::execution::types::unpaid_epoch::v0::{UnpaidEpochV0Getters, UnpaidEpochV0Methods}; + +use drive::grovedb::Transaction; + +impl Platform { + /// Adds operations to the op batch which distribute fees + /// from the oldest unpaid epoch pool to proposers. + /// + /// Returns `ProposersPayouts` if there are any. + pub(super) fn add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations_v1( + &self, + current_epoch_index: u16, + cached_current_epoch_start_block_height: Option, + cached_current_epoch_start_block_core_height: Option, + total_distributed_storage_fees: Credits, + transaction: &Transaction, + batch: &mut Vec, + platform_version: &PlatformVersion, + ) -> Result, Error> { + let unpaid_epoch = self.find_oldest_epoch_needing_payment( + current_epoch_index, + cached_current_epoch_start_block_height, + cached_current_epoch_start_block_core_height, + Some(transaction), + platform_version, + )?; + + let Some(unpaid_epoch) = unpaid_epoch else { + return Ok(None); + }; + + let start_block_core_height = if unpaid_epoch.epoch_index == 0 { + //On epoch 0 we need to use the activation height instead of the start of the block + self.drive + .fetch_genesis_core_height(Some(transaction), platform_version)? + } else { + // The unpaid epochs start block has had its credits distributed, so we must do a + 1 + // But only if we are not in the first epoch + unpaid_epoch.start_block_core_height + 1 + }; + + // Calculate core block reward for the unpaid epoch + let core_block_rewards = epoch_core_reward_credits_for_distribution( + start_block_core_height, + unpaid_epoch.next_epoch_start_block_core_height, + self.config.network.core_subsidy_halving_interval(), + platform_version, + )?; + + // We must add to the system credits the epoch core block rewards + // On the Core side we move block rewards every block to asset lock pool + batch.push(DriveOperation::SystemOperation( + SystemOperationType::AddToSystemCredits { + amount: core_block_rewards, + }, + )); + + tracing::info!( + "Core block rewards for epoch {} from height {} to height {} are {}", + unpaid_epoch.epoch_index, + unpaid_epoch.start_block_core_height, + unpaid_epoch.next_epoch_start_block_core_height, + core_block_rewards + ); + + let unpaid_epoch = unpaid_epoch.into(); + + let (storage_and_processing_credits, block_proposers) = self + .add_epoch_pool_to_proposers_payout_operations( + &unpaid_epoch, + core_block_rewards, + transaction, + batch, + platform_version, + )?; + + let mut inner_batch = GroveDbOpBatch::new(); + + let unpaid_epoch_tree = Epoch::new(unpaid_epoch.epoch_index())?; + + unpaid_epoch_tree.add_mark_as_paid_operations(&mut inner_batch); + + inner_batch.push(update_unpaid_epoch_index_operation( + unpaid_epoch.next_unpaid_epoch_index(), + )); + + let proposers_paid_count = block_proposers.len() as u16; + + let finalized_epoch_info: FinalizedEpochInfo = FinalizedEpochInfoV0 { + first_block_time: unpaid_epoch.epoch_start_time(), + first_block_height: unpaid_epoch.start_block_height(), + total_blocks_in_epoch: unpaid_epoch.block_count()?, + first_core_block_height: start_block_core_height, + next_epoch_start_core_block_height: unpaid_epoch.next_epoch_start_block_core_height(), + total_processing_fees: storage_and_processing_credits.processing_pool_credits, + total_distributed_storage_fees: storage_and_processing_credits.storage_pool_credits, + total_created_storage_fees: total_distributed_storage_fees, + core_block_rewards, + block_proposers, + fee_multiplier_permille: unpaid_epoch.fee_multiplier(), + protocol_version: unpaid_epoch.protocol_version(), + } + .into(); + + let add_epoch_final_info_operation = self.drive.add_epoch_final_info_operation( + &unpaid_epoch_tree, + finalized_epoch_info, + platform_version, + )?; + + inner_batch.push(add_epoch_final_info_operation); + + batch.push(DriveOperation::GroveDBOpBatch(inner_batch)); + + // We paid to all epoch proposers last block. Since proposers paid count + // was equal to proposers limit, we paid to 0 proposers this block + if proposers_paid_count == 0 { + return Ok(None); + } + + Ok(Some(proposer_payouts::v0::ProposersPayouts { + proposers_paid_count, + paid_epoch_index: unpaid_epoch.epoch_index(), + })) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use dpp::block::block_info::BlockInfo; + + use drive::util::test_helpers::test_utils::identities::create_test_masternode_identities_and_add_them_as_epoch_block_proposers; + + use crate::test::helpers::setup::TestPlatformBuilder; + + use crate::execution::types::proposer_payouts::v0::ProposersPayouts; + use drive::error::Error as DriveError; + use drive::grovedb; + + #[test] + fn test_nothing_to_distribute_if_there_is_no_epochs_needing_payment() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure(); + let transaction = platform.drive.grove.start_transaction(); + + let current_epoch_index = 0; + + let mut batch = vec![]; + + let proposers_payouts = platform + .add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations_v0( + current_epoch_index, + None, + None, + &transaction, + &mut batch, + platform_version, + ) + .expect("should distribute fees"); + + assert!(proposers_payouts.is_none()); + } + + #[test] + fn test_mark_epoch_as_paid_and_update_next_update_epoch_index_if_all_proposers_paid() { + let platform_version = PlatformVersion::latest(); + let platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_genesis_state_with_activation_info(0, 1); + let transaction = platform.drive.grove.start_transaction(); + + // Create masternode reward shares contract + platform.create_mn_shares_contract(Some(&transaction), platform_version); + + let proposers_count = 150; + let processing_fees = 100000000; + let storage_fees = 10000000; + + let unpaid_epoch = Epoch::new(0).unwrap(); + let current_epoch = Epoch::new(1).unwrap(); + + let mut batch = GroveDbOpBatch::new(); + + unpaid_epoch.add_init_current_operations( + platform_version + .fee_version + .uses_version_fee_multiplier_permille + .expect("expected a fee multiplier"), + 1, + 1, + 1, + platform_version.protocol_version, + &mut batch, + ); + + batch.push( + unpaid_epoch + .update_processing_fee_pool_operation(processing_fees) + .expect("should add operation"), + ); + + batch.push( + unpaid_epoch + .update_storage_fee_pool_operation(storage_fees) + .expect("should add operation"), + ); + + current_epoch.add_init_current_operations( + platform_version + .fee_version + .uses_version_fee_multiplier_permille + .expect("expected a fee multiplier"), + proposers_count as u64 + 1, + 3, + 2, + platform_version.protocol_version, + &mut batch, + ); + + platform + .drive + .grove_apply_batch(batch, false, Some(&transaction), &platform_version.drive) + .expect("should apply batch"); + + let proposers = create_test_masternode_identities_and_add_them_as_epoch_block_proposers( + &platform.drive, + &unpaid_epoch, + proposers_count, + Some(65), //random number + Some(&transaction), + platform_version, + ); + + let mut batch = vec![]; + + let proposer_payouts = platform + .add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations_v0( + current_epoch.index, + None, + None, + &transaction, + &mut batch, + platform_version, + ) + .expect("should distribute fees"); + + platform + .drive + .apply_drive_operations( + batch, + true, + &BlockInfo::default(), + Some(&transaction), + platform_version, + None, + ) + .expect("should apply batch"); + + assert!(matches!( + proposer_payouts, + Some(ProposersPayouts { + proposers_paid_count: p, + paid_epoch_index: 0, + }) if p == proposers_count + )); + + let next_unpaid_epoch_index = platform + .drive + .get_unpaid_epoch_index(Some(&transaction), platform_version) + .expect("should get unpaid epoch index"); + + assert_eq!(next_unpaid_epoch_index, current_epoch.index); + + // check we've removed proposers tree + let result = platform.drive.get_epochs_proposer_block_count( + &unpaid_epoch, + &proposers[0], + Some(&transaction), + platform_version, + ); + + assert!(matches!( + result, + Err(DriveError::GroveDB( + grovedb::Error::PathParentLayerNotFound(_) + )) + )); + } +} diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/mod.rs index 99359266920..4fd3827a6a6 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/mod.rs @@ -2,9 +2,10 @@ use crate::error::execution::ExecutionError; use crate::error::Error; use crate::execution::types::unpaid_epoch::UnpaidEpoch; use crate::platform_types::platform::Platform; +use dpp::block::pool_credits::StorageAndProcessingPoolCredits; use dpp::fee::Credits; - +use dpp::identifier::Identifier; use dpp::version::PlatformVersion; use drive::grovedb::Transaction; use drive::util::batch::DriveOperation; @@ -37,7 +38,7 @@ impl Platform { transaction: &Transaction, batch: &mut Vec, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result<(StorageAndProcessingPoolCredits, Vec<(Identifier, u64)>), Error> { match platform_version .drive_abci .methods diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/v0/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/v0/mod.rs index 14de0e6df7e..7d095a8bf05 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/add_epoch_pool_to_proposers_payout_operations/v0/mod.rs @@ -6,10 +6,11 @@ use crate::execution::types::unpaid_epoch::UnpaidEpoch; use crate::platform_types::platform::Platform; use dpp::block::block_info::BlockInfo; use dpp::block::epoch::Epoch; +use dpp::block::pool_credits::StorageAndProcessingPoolCredits; use dpp::document::DocumentV0Getters; use dpp::fee::Credits; use dpp::platform_value::btreemap_extensions::BTreeValueMapHelper; - +use dpp::platform_value::Identifier; use dpp::version::PlatformVersion; use dpp::ProtocolError; use drive::query::proposer_block_count_query::ProposerQueryType; @@ -31,7 +32,7 @@ impl Platform { transaction: &Transaction, batch: &mut Vec, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result<(StorageAndProcessingPoolCredits, Vec<(Identifier, u64)>), Error> { let mut drive_operations = vec![]; let unpaid_epoch_tree = Epoch::new(unpaid_epoch.epoch_index())?; @@ -45,6 +46,7 @@ impl Platform { .map_err(Error::Drive)?; let total_payouts = storage_and_processing_fees + .total_credits .checked_add(core_block_rewards) .ok_or_else(|| { Error::Execution(ExecutionError::Overflow("overflow when adding reward fees")) @@ -70,19 +72,19 @@ impl Platform { tracing::trace!( unpaid_block_count = unpaid_epoch_block_count, unpaid_epoch_index = unpaid_epoch.epoch_index(), - fees = storage_and_processing_fees, core_block_rewards, total_payouts, - "Pay {total_payouts} credits to {proposers_len} proposers for {} proposed blocks in epoch {}", + "Pay {total_payouts} credits to {proposers_len} proposers for {} proposed blocks in epoch {}, decomposed as {}", unpaid_epoch_block_count, unpaid_epoch.epoch_index(), + storage_and_processing_fees ); - for (i, (proposer_tx_hash, proposed_block_count)) in proposers.into_iter().enumerate() { + for (i, (proposer_tx_hash, proposed_block_count)) in proposers.iter().enumerate() { let i = i as u16; let total_masternode_payout = total_payouts - .checked_mul(proposed_block_count) + .checked_mul(*proposed_block_count) .and_then(|r| r.checked_div(unpaid_epoch_block_count)) .ok_or(Error::Execution(ExecutionError::Overflow( "overflow when getting masternode reward division", @@ -91,7 +93,7 @@ impl Platform { let mut masternode_payout_leftover = total_masternode_payout; let documents = self.fetch_reward_shares_list_for_masternode( - &proposer_tx_hash, + proposer_tx_hash.as_bytes(), Some(transaction), platform_version, )?; @@ -148,14 +150,8 @@ impl Platform { masternode_payout_leftover }; - let proposer = proposer_tx_hash.as_slice().try_into().map_err(|_| { - Error::Execution(ExecutionError::DriveIncoherence( - "proposer_tx_hash is not 32 bytes long", - )) - })?; - drive_operations.push(IdentityOperation(AddToIdentityBalance { - identity_id: proposer, + identity_id: proposer_tx_hash.to_buffer(), added_balance: proposer_payout, })); } @@ -169,7 +165,7 @@ impl Platform { batch.push(DriveOperation::GroveDBOpBatch(operations)); - Ok(proposers_len) + Ok((storage_and_processing_fees, proposers)) } } @@ -290,6 +286,9 @@ mod tests { start_block_core_height: 1, next_unpaid_epoch_index: 0, next_epoch_start_block_core_height: 1, + epoch_start_time: 0, + protocol_version: platform_version.protocol_version, + fee_multiplier: 0, }; let proposers_paid_count = platform @@ -300,7 +299,8 @@ mod tests { &mut batch, platform_version, ) - .expect("should distribute fees"); + .expect("should distribute fees") + .1; platform .drive @@ -314,7 +314,7 @@ mod tests { ) .expect("should apply batch"); - assert_eq!(proposers_paid_count, 10); + assert_eq!(proposers_paid_count.len(), 10); // check we paid 500 to every mn identity let paid_mn_identities_balances = platform diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/mod.rs index 7a3f8d39824..c038e466997 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/mod.rs @@ -27,7 +27,7 @@ impl Platform { /// * `Result, Error>` - A list of `Document` if successful. Otherwise, an `Error`. pub fn fetch_reward_shares_list_for_masternode( &self, - masternode_owner_id: &[u8], + masternode_owner_id: &[u8; 32], transaction: TransactionArg, platform_version: &PlatformVersion, ) -> Result, Error> { diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/v0/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/v0/mod.rs index c2174873406..a7999958dc6 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/fetch_reward_shares_list_for_masternode/v0/mod.rs @@ -28,7 +28,7 @@ impl Platform { /// A function to retrieve a list of the masternode reward shares documents for a list of masternode IDs. pub(super) fn fetch_reward_shares_list_for_masternode_v0( &self, - masternode_owner_id: &[u8], + masternode_owner_id: &[u8; 32], transaction: TransactionArg, platform_version: &PlatformVersion, ) -> Result, Error> { diff --git a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/find_oldest_epoch_needing_payment/v0/mod.rs b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/find_oldest_epoch_needing_payment/v0/mod.rs index 56278173b73..9ad2e8a604c 100644 --- a/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/find_oldest_epoch_needing_payment/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/platform_events/fee_pool_outwards_distribution/find_oldest_epoch_needing_payment/v0/mod.rs @@ -6,7 +6,8 @@ use dpp::block::epoch::Epoch; use dpp::fee::epoch::GENESIS_EPOCH_INDEX; use dpp::version::PlatformVersion; use drive::drive::credit_pools::epochs::start_block::StartBlockInfo; - +use drive::error; +use drive::error::drive::DriveError; use drive::grovedb::TransactionArg; impl Platform { @@ -36,6 +37,16 @@ impl Platform { let unpaid_epoch = Epoch::new(unpaid_epoch_index)?; + let epoch_start_time = self.drive.get_expected_epoch_start_time( + &unpaid_epoch, + transaction, + platform_version, + )?; + + let epoch_protocol_version = + self.drive + .get_epoch_protocol_version(&unpaid_epoch, transaction, platform_version)?; + let start_block_height = self.drive.get_epoch_start_block_height( &unpaid_epoch, transaction, @@ -48,6 +59,10 @@ impl Platform { platform_version, )?; + let fee_multiplier = + self.drive + .get_epoch_fee_multiplier(&unpaid_epoch, transaction, platform_version)?; + let next_unpaid_epoch_info = if unpaid_epoch.index == current_epoch_index - 1 { // Use cached or committed block height for previous epoch let start_block_height = match cached_current_epoch_start_block_height { @@ -117,10 +132,13 @@ impl Platform { Ok(Some(unpaid_epoch::v0::UnpaidEpochV0 { epoch_index: unpaid_epoch_index, next_unpaid_epoch_index: next_unpaid_epoch_info.epoch_index, + epoch_start_time, start_block_height, next_epoch_start_block_height: next_unpaid_epoch_info.start_block_height, start_block_core_height, next_epoch_start_block_core_height: next_unpaid_epoch_info.start_block_core_height, + protocol_version: epoch_protocol_version, + fee_multiplier, })) } } @@ -132,6 +150,7 @@ mod tests { mod find_oldest_epoch_needing_payment { use crate::execution::types::unpaid_epoch::v0::UnpaidEpochV0Methods; use crate::test::helpers::setup::TestPlatformBuilder; + use assert_matches::assert_matches; use drive::drive::credit_pools::epochs::operations_factory::EpochOperations; use drive::drive::credit_pools::operations::update_unpaid_epoch_index_operation; use drive::util::batch::grovedb_op_batch::GroveDbOpBatchV0Methods; @@ -207,14 +226,17 @@ mod tests { .set_initial_state_structure(); let transaction = platform.drive.grove.start_transaction(); - let epoch_0_tree = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); + let epoch_0 = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); let current_epoch_index = GENESIS_EPOCH_INDEX + 1; let mut batch = GroveDbOpBatch::new(); - batch.push(epoch_0_tree.update_start_block_height_operation(1)); - batch.push(epoch_0_tree.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_block_height_operation(1)); + batch.push(epoch_0.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_time_operation(1)); + batch.push(epoch_0.update_protocol_version_operation(1)); + batch.push(epoch_0.update_fee_multiplier_operation(1000)); platform .drive @@ -261,18 +283,24 @@ mod tests { .set_initial_state_structure(); let transaction = platform.drive.grove.start_transaction(); - let epoch_0_tree = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); + let epoch_0 = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); let current_epoch_index = GENESIS_EPOCH_INDEX + 1; - let epoch_1_tree = Epoch::new(current_epoch_index).unwrap(); + let epoch_1 = Epoch::new(current_epoch_index).unwrap(); let mut batch = GroveDbOpBatch::new(); - batch.push(epoch_0_tree.update_start_block_height_operation(1)); - batch.push(epoch_0_tree.update_start_block_core_height_operation(1)); - batch.push(epoch_1_tree.update_start_block_height_operation(2)); - batch.push(epoch_1_tree.update_start_block_core_height_operation(2)); + batch.push(epoch_0.update_start_block_height_operation(1)); + batch.push(epoch_0.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_time_operation(1)); + batch.push(epoch_0.update_protocol_version_operation(1)); + batch.push(epoch_0.update_fee_multiplier_operation(1000)); + batch.push(epoch_1.update_start_block_height_operation(2)); + batch.push(epoch_1.update_start_block_core_height_operation(2)); + batch.push(epoch_1.update_start_time_operation(2)); + batch.push(epoch_1.update_protocol_version_operation(2)); + batch.push(epoch_1.update_fee_multiplier_operation(1000)); platform .drive @@ -314,21 +342,30 @@ mod tests { .set_initial_state_structure(); let transaction = platform.drive.grove.start_transaction(); - let epoch_0_tree = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); - let epoch_1_tree = Epoch::new(GENESIS_EPOCH_INDEX + 1).unwrap(); + let epoch_0 = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); + let epoch_1 = Epoch::new(GENESIS_EPOCH_INDEX + 1).unwrap(); let current_epoch_index = GENESIS_EPOCH_INDEX + 2; - let epoch_2_tree = Epoch::new(current_epoch_index).unwrap(); + let epoch_2 = Epoch::new(current_epoch_index).unwrap(); let mut batch = GroveDbOpBatch::new(); - batch.push(epoch_0_tree.update_start_block_height_operation(1)); - batch.push(epoch_0_tree.update_start_block_core_height_operation(1)); - batch.push(epoch_1_tree.update_start_block_height_operation(2)); - batch.push(epoch_1_tree.update_start_block_core_height_operation(2)); - batch.push(epoch_2_tree.update_start_block_height_operation(3)); - batch.push(epoch_2_tree.update_start_block_core_height_operation(3)); + batch.push(epoch_0.update_start_block_height_operation(1)); + batch.push(epoch_0.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_time_operation(1)); + batch.push(epoch_0.update_protocol_version_operation(1)); + batch.push(epoch_0.update_fee_multiplier_operation(1000)); + batch.push(epoch_1.update_start_block_height_operation(2)); + batch.push(epoch_1.update_start_block_core_height_operation(2)); + batch.push(epoch_1.update_start_time_operation(2)); + batch.push(epoch_1.update_protocol_version_operation(2)); + batch.push(epoch_1.update_fee_multiplier_operation(1000)); + batch.push(epoch_2.update_start_block_height_operation(3)); + batch.push(epoch_2.update_start_block_core_height_operation(3)); + batch.push(epoch_2.update_start_time_operation(3)); + batch.push(epoch_2.update_protocol_version_operation(3)); + batch.push(epoch_2.update_fee_multiplier_operation(1000)); platform .drive @@ -370,14 +407,17 @@ mod tests { .set_initial_state_structure(); let transaction = platform.drive.grove.start_transaction(); - let epoch_0_tree = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); + let epoch_0 = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); let current_epoch_index = GENESIS_EPOCH_INDEX + 2; let mut batch = GroveDbOpBatch::new(); - batch.push(epoch_0_tree.update_start_block_height_operation(1)); - batch.push(epoch_0_tree.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_block_height_operation(1)); + batch.push(epoch_0.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_time_operation(1)); + batch.push(epoch_0.update_protocol_version_operation(1)); + batch.push(epoch_0.update_fee_multiplier_operation(1000)); platform .drive @@ -423,14 +463,17 @@ mod tests { .set_initial_state_structure(); let transaction = platform.drive.grove.start_transaction(); - let epoch_0_tree = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); + let epoch_0 = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); let current_epoch_index = GENESIS_EPOCH_INDEX + 2; let mut batch = GroveDbOpBatch::new(); - batch.push(epoch_0_tree.update_start_block_height_operation(1)); - batch.push(epoch_0_tree.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_block_height_operation(1)); + batch.push(epoch_0.update_start_block_core_height_operation(1)); + batch.push(epoch_0.update_start_time_operation(1)); + batch.push(epoch_0.update_protocol_version_operation(1)); + batch.push(epoch_0.update_fee_multiplier_operation(1000)); platform .drive @@ -445,10 +488,10 @@ mod tests { platform_version, ); - assert!(matches!( + assert_matches!( unpaid_epoch, Err(Error::Execution(ExecutionError::CorruptedCodeExecution(_))) - )); + ); } } } diff --git a/packages/rs-drive-abci/src/execution/types/storage_fee_distribution_outcome/v0/mod.rs b/packages/rs-drive-abci/src/execution/types/storage_fee_distribution_outcome/v0/mod.rs index 699126b93e4..1c6bcd1e2ca 100644 --- a/packages/rs-drive-abci/src/execution/types/storage_fee_distribution_outcome/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/types/storage_fee_distribution_outcome/v0/mod.rs @@ -3,6 +3,8 @@ use dpp::fee::Credits; //todo: make this non versioned /// Result of storage fee distribution pub struct StorageFeeDistributionOutcome { + /// The total distributed storage fees of the epoch + pub total_distributed_storage_fees: Credits, /// Leftovers in result of divisions and rounding after storage fee distribution to epochs pub leftovers: Credits, /// A number of epochs which had refunded diff --git a/packages/rs-drive-abci/src/execution/types/unpaid_epoch/mod.rs b/packages/rs-drive-abci/src/execution/types/unpaid_epoch/mod.rs index be55eacd384..c308239e8d8 100644 --- a/packages/rs-drive-abci/src/execution/types/unpaid_epoch/mod.rs +++ b/packages/rs-drive-abci/src/execution/types/unpaid_epoch/mod.rs @@ -3,6 +3,9 @@ use crate::execution::types::unpaid_epoch::v0::{ }; use derive_more::From; use dpp::block::epoch::EpochIndex; +use dpp::identity::TimestampMillis; +use dpp::prelude::FeeMultiplier; +use dpp::util::deserializer::ProtocolVersion; use drive::error::Error; pub mod v0; @@ -28,6 +31,12 @@ impl UnpaidEpochV0Getters for UnpaidEpoch { } } + fn epoch_start_time(&self) -> TimestampMillis { + match self { + UnpaidEpoch::V0(v0) => v0.epoch_start_time(), + } + } + fn next_unpaid_epoch_index(&self) -> EpochIndex { match self { UnpaidEpoch::V0(v0) => v0.next_unpaid_epoch_index(), @@ -57,6 +66,18 @@ impl UnpaidEpochV0Getters for UnpaidEpoch { UnpaidEpoch::V0(v0) => v0.next_epoch_start_block_core_height(), } } + + fn protocol_version(&self) -> ProtocolVersion { + match self { + UnpaidEpoch::V0(v0) => v0.protocol_version(), + } + } + + fn fee_multiplier(&self) -> FeeMultiplier { + match self { + UnpaidEpoch::V0(v0) => v0.fee_multiplier(), + } + } } impl UnpaidEpochV0Setters for UnpaidEpoch { @@ -66,6 +87,12 @@ impl UnpaidEpochV0Setters for UnpaidEpoch { } } + fn set_epoch_start_time(&mut self, epoch_start_time: TimestampMillis) { + match self { + UnpaidEpoch::V0(v0) => v0.set_epoch_start_time(epoch_start_time), + } + } + fn set_next_unpaid_epoch_index(&mut self, next_unpaid_epoch_index: EpochIndex) { match self { UnpaidEpoch::V0(v0) => v0.set_next_unpaid_epoch_index(next_unpaid_epoch_index), @@ -99,4 +126,16 @@ impl UnpaidEpochV0Setters for UnpaidEpoch { } } } + + fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) { + match self { + UnpaidEpoch::V0(v0) => v0.set_protocol_version(protocol_version), + } + } + + fn set_fee_multiplier(&mut self, fee_multiplier: FeeMultiplier) { + match self { + UnpaidEpoch::V0(v0) => v0.set_fee_multiplier(fee_multiplier), + } + } } diff --git a/packages/rs-drive-abci/src/execution/types/unpaid_epoch/v0/mod.rs b/packages/rs-drive-abci/src/execution/types/unpaid_epoch/v0/mod.rs index fa9180b38f2..a28ce22f0d2 100644 --- a/packages/rs-drive-abci/src/execution/types/unpaid_epoch/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/types/unpaid_epoch/v0/mod.rs @@ -1,4 +1,7 @@ use dpp::block::epoch::EpochIndex; +use dpp::identity::TimestampMillis; +use dpp::prelude::{BlockHeight, CoreBlockHeight, FeeMultiplier}; +use dpp::util::deserializer::ProtocolVersion; use drive::error; use drive::error::fee::FeeError; @@ -9,14 +12,21 @@ pub struct UnpaidEpochV0 { pub epoch_index: EpochIndex, /// Index of the next unpaid epoch pub next_unpaid_epoch_index: EpochIndex, + /// Start time of the epoch + /// Also the time that the first block of the epoch was created + pub epoch_start_time: TimestampMillis, /// Block height of the first block in the epoch - pub start_block_height: u64, + pub start_block_height: BlockHeight, /// Block height of the first block in next epoch - pub next_epoch_start_block_height: u64, + pub next_epoch_start_block_height: BlockHeight, /// Block height of the first block in the epoch - pub start_block_core_height: u32, + pub start_block_core_height: CoreBlockHeight, /// Block height of the first block in next epoch - pub next_epoch_start_block_core_height: u32, + pub next_epoch_start_block_core_height: CoreBlockHeight, + /// Protocol version + pub protocol_version: ProtocolVersion, + /// Fee multiplier + pub fee_multiplier: FeeMultiplier, } pub trait UnpaidEpochV0Methods { @@ -39,32 +49,51 @@ impl UnpaidEpochV0Methods for UnpaidEpochV0 { pub trait UnpaidEpochV0Getters { /// Get the index of the current epoch fn epoch_index(&self) -> EpochIndex; + /// Get the start time of the epoch + fn epoch_start_time(&self) -> TimestampMillis; /// Get the index of the next unpaid epoch fn next_unpaid_epoch_index(&self) -> EpochIndex; /// Get the block height of the first block in the epoch - fn start_block_height(&self) -> u64; + fn start_block_height(&self) -> BlockHeight; /// Get the block height of the first block in the next epoch - fn next_epoch_start_block_height(&self) -> u64; + fn next_epoch_start_block_height(&self) -> BlockHeight; /// Get the block height of the first block in the epoch in the core chain - fn start_block_core_height(&self) -> u32; + fn start_block_core_height(&self) -> CoreBlockHeight; /// Get the block height of the first block in the next epoch in the core chain - fn next_epoch_start_block_core_height(&self) -> u32; + fn next_epoch_start_block_core_height(&self) -> CoreBlockHeight; + + /// Get the protocol version that the epoch used + fn protocol_version(&self) -> ProtocolVersion; + + /// Get the fee multiplier that the epoch used + fn fee_multiplier(&self) -> FeeMultiplier; } /// Trait that defines setter methods for `UnpaidEpochV0` pub trait UnpaidEpochV0Setters { /// Set the index of the current epoch fn set_epoch_index(&mut self, epoch_index: EpochIndex); + /// Set the start time of the epoch + fn set_epoch_start_time(&mut self, epoch_start_time: TimestampMillis); /// Set the index of the next unpaid epoch fn set_next_unpaid_epoch_index(&mut self, next_unpaid_epoch_index: EpochIndex); /// Set the block height of the first block in the epoch - fn set_start_block_height(&mut self, start_block_height: u64); + fn set_start_block_height(&mut self, start_block_height: BlockHeight); /// Set the block height of the first block in the next epoch - fn set_next_epoch_start_block_height(&mut self, next_epoch_start_block_height: u64); + fn set_next_epoch_start_block_height(&mut self, next_epoch_start_block_height: BlockHeight); /// Set the block height of the first block in the epoch in the core chain - fn set_start_block_core_height(&mut self, start_block_core_height: u32); + fn set_start_block_core_height(&mut self, start_block_core_height: CoreBlockHeight); /// Set the block height of the first block in the next epoch in the core chain - fn set_next_epoch_start_block_core_height(&mut self, next_epoch_start_block_core_height: u32); + fn set_next_epoch_start_block_core_height( + &mut self, + next_epoch_start_block_core_height: CoreBlockHeight, + ); + + /// Set the protocol version that the epoch used + fn set_protocol_version(&mut self, protocol_version: ProtocolVersion); + + /// Set the fee multiplier that the epoch used + fn set_fee_multiplier(&mut self, fee_multiplier: FeeMultiplier); } impl UnpaidEpochV0Getters for UnpaidEpochV0 { @@ -72,25 +101,37 @@ impl UnpaidEpochV0Getters for UnpaidEpochV0 { self.epoch_index } + fn epoch_start_time(&self) -> TimestampMillis { + self.epoch_start_time + } + fn next_unpaid_epoch_index(&self) -> EpochIndex { self.next_unpaid_epoch_index } - fn start_block_height(&self) -> u64 { + fn start_block_height(&self) -> BlockHeight { self.start_block_height } - fn next_epoch_start_block_height(&self) -> u64 { + fn next_epoch_start_block_height(&self) -> BlockHeight { self.next_epoch_start_block_height } - fn start_block_core_height(&self) -> u32 { + fn start_block_core_height(&self) -> CoreBlockHeight { self.start_block_core_height } - fn next_epoch_start_block_core_height(&self) -> u32 { + fn next_epoch_start_block_core_height(&self) -> CoreBlockHeight { self.next_epoch_start_block_core_height } + + fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + fn fee_multiplier(&self) -> FeeMultiplier { + self.fee_multiplier + } } impl UnpaidEpochV0Setters for UnpaidEpochV0 { @@ -98,6 +139,10 @@ impl UnpaidEpochV0Setters for UnpaidEpochV0 { self.epoch_index = epoch_index; } + fn set_epoch_start_time(&mut self, epoch_start_time: TimestampMillis) { + self.epoch_start_time = epoch_start_time; + } + fn set_next_unpaid_epoch_index(&mut self, next_unpaid_epoch_index: EpochIndex) { self.next_unpaid_epoch_index = next_unpaid_epoch_index; } @@ -117,4 +162,12 @@ impl UnpaidEpochV0Setters for UnpaidEpochV0 { fn set_next_epoch_start_block_core_height(&mut self, next_epoch_start_block_core_height: u32) { self.next_epoch_start_block_core_height = next_epoch_start_block_core_height; } + + fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) { + self.protocol_version = protocol_version; + } + + fn set_fee_multiplier(&mut self, fee_multiplier: FeeMultiplier) { + self.fee_multiplier = fee_multiplier; + } } diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/advanced_structure_v0/mod.rs similarity index 100% rename from packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/structure_v0/mod.rs rename to packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/advanced_structure_v0/mod.rs diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/mod.rs index 5304f28180b..af90241f1da 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_create_transition_action/mod.rs @@ -10,12 +10,12 @@ use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::document::document_create_transition_action::state_v0::DocumentCreateTransitionActionStateValidationV0; use crate::execution::validation::state_transition::batch::action_validation::document::document_create_transition_action::state_v1::DocumentCreateTransitionActionStateValidationV1; -use crate::execution::validation::state_transition::batch::action_validation::document::document_create_transition_action::structure_v0::DocumentCreateTransitionActionStructureValidationV0; +use crate::execution::validation::state_transition::batch::action_validation::document::document_create_transition_action::advanced_structure_v0::DocumentCreateTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; +mod advanced_structure_v0; mod state_v0; mod state_v1; -mod structure_v0; pub trait DocumentCreateTransitionActionValidation { fn validate_structure( diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/advanced_structure_v0/mod.rs similarity index 100% rename from packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/structure_v0/mod.rs rename to packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/advanced_structure_v0/mod.rs diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/mod.rs index 4bc007486bc..cc1a6cfa0d1 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_delete_transition_action/mod.rs @@ -8,11 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::document::document_delete_transition_action::state_v0::DocumentDeleteTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::document::document_delete_transition_action::structure_v0::DocumentDeleteTransitionActionStructureValidationV0; +use crate::execution::validation::state_transition::batch::action_validation::document::document_delete_transition_action::advanced_structure_v0::DocumentDeleteTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; +mod advanced_structure_v0; mod state_v0; -mod structure_v0; pub trait DocumentDeleteTransitionActionValidation { fn validate_structure( diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/advanced_structure_v0/mod.rs similarity index 100% rename from packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/structure_v0/mod.rs rename to packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/advanced_structure_v0/mod.rs diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/mod.rs index 8b37d7ed409..ae8b5b4ff08 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_purchase_transition_action/mod.rs @@ -9,11 +9,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::document::document_purchase_transition_action::state_v0::DocumentPurchaseTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::document::document_purchase_transition_action::structure_v0::DocumentPurchaseTransitionActionStructureValidationV0; +use crate::execution::validation::state_transition::batch::action_validation::document::document_purchase_transition_action::advanced_structure_v0::DocumentPurchaseTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; +mod advanced_structure_v0; mod state_v0; -mod structure_v0; pub trait DocumentPurchaseTransitionActionValidation { fn validate_structure( diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/advanced_structure_v0/mod.rs similarity index 100% rename from packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/structure_v0/mod.rs rename to packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/advanced_structure_v0/mod.rs diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/mod.rs index 6419ff58119..de32e19b21c 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_replace_transition_action/mod.rs @@ -9,11 +9,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::document::document_replace_transition_action::state_v0::DocumentReplaceTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::document::document_replace_transition_action::structure_v0::DocumentReplaceTransitionActionStructureValidationV0; +use crate::execution::validation::state_transition::batch::action_validation::document::document_replace_transition_action::advanced_structure_v0::DocumentReplaceTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; +mod advanced_structure_v0; mod state_v0; -mod structure_v0; pub trait DocumentReplaceTransitionActionValidation { fn validate_structure( diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/advanced_structure_v0/mod.rs similarity index 100% rename from packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/structure_v0/mod.rs rename to packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/advanced_structure_v0/mod.rs diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/mod.rs index d5df2e9b98c..b88eb510cb1 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_transfer_transition_action/mod.rs @@ -9,11 +9,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::document::document_transfer_transition_action::state_v0::DocumentTransferTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::document::document_transfer_transition_action::structure_v0::DocumentTransferTransitionActionStructureValidationV0; +use crate::execution::validation::state_transition::batch::action_validation::document::document_transfer_transition_action::advanced_structure_v0::DocumentTransferTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; +mod advanced_structure_v0; mod state_v0; -mod structure_v0; pub trait DocumentTransferTransitionActionValidation { fn validate_structure( diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/advanced_structure_v0/mod.rs similarity index 100% rename from packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/structure_v0/mod.rs rename to packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/advanced_structure_v0/mod.rs diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/mod.rs index 6b9dcdcfe74..773bf41d016 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/document/document_update_price_transition_action/mod.rs @@ -9,11 +9,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::document::document_update_price_transition_action::state_v0::DocumentUpdatePriceTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::document::document_update_price_transition_action::structure_v0::DocumentUpdatePriceTransitionActionStructureValidationV0; +use crate::execution::validation::state_transition::batch::action_validation::document::document_update_price_transition_action::advanced_structure_v0::DocumentUpdatePriceTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; +mod advanced_structure_v0; mod state_v0; -mod structure_v0; pub trait DocumentUpdatePriceTransitionActionValidation { fn validate_structure( diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/mod.rs index 7060861681a..49646315c67 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/mod.rs @@ -1,5 +1,6 @@ pub(crate) mod token_base_transition_action; pub(crate) mod token_burn_transition_action; +pub(crate) mod token_claim_transition_action; pub(crate) mod token_config_update_transition_action; pub(crate) mod token_destroy_frozen_funds_transition_action; pub(crate) mod token_emergency_action_transition_action; diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_burn_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_burn_transition_action/mod.rs index e92feab605f..422a6e6cb78 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_burn_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_burn_transition_action/mod.rs @@ -8,18 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_burn_transition_action::state_v0::TokenBurnTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_burn_transition_action::structure_v0::TokenBurnTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; pub trait TokenBurnTransitionActionValidation { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result; - fn validate_state( &self, platform: &PlatformStateRef, @@ -32,26 +25,6 @@ pub trait TokenBurnTransitionActionValidation { } impl TokenBurnTransitionActionValidation for TokenBurnTransitionAction { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_burn_transition_structure_validation - { - 0 => self.validate_structure_v0(platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenBurnTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_burn_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_burn_transition_action/structure_v0/mod.rs deleted file mode 100644 index 440d14ec869..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_burn_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_burn_transition_action::{TokenBurnTransitionAction, TokenBurnTransitionActionAccessorsV0}; -use dpp::version::PlatformVersion; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenBurnTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenBurnTransitionActionStructureValidationV0 for TokenBurnTransitionAction { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_claim_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_claim_transition_action/mod.rs new file mode 100644 index 00000000000..0e01c1fd235 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_claim_transition_action/mod.rs @@ -0,0 +1,59 @@ +use dpp::block::block_info::BlockInfo; +use dpp::identifier::Identifier; +use dpp::validation::SimpleConsensusValidationResult; +use drive::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::TokenClaimTransitionAction; +use dpp::version::PlatformVersion; +use drive::grovedb::TransactionArg; +use crate::error::Error; +use crate::error::execution::ExecutionError; +use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; +use crate::execution::validation::state_transition::batch::action_validation::token::token_claim_transition_action::state_v0::TokenClaimTransitionActionStateValidationV0; +use crate::platform_types::platform::PlatformStateRef; + +mod state_v0; + +pub trait TokenClaimTransitionActionValidation { + fn validate_state( + &self, + platform: &PlatformStateRef, + owner_id: Identifier, + block_info: &BlockInfo, + execution_context: &mut StateTransitionExecutionContext, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result; +} + +impl TokenClaimTransitionActionValidation for TokenClaimTransitionAction { + fn validate_state( + &self, + platform: &PlatformStateRef, + owner_id: Identifier, + block_info: &BlockInfo, + execution_context: &mut StateTransitionExecutionContext, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive_abci + .validation_and_processing + .state_transitions + .batch_state_transition + .token_claim_transition_state_validation + { + 0 => self.validate_state_v0( + platform, + owner_id, + block_info, + execution_context, + transaction, + platform_version, + ), + version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { + method: "TokenClaimTransitionAction::validate_state".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_claim_transition_action/state_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_claim_transition_action/state_v0/mod.rs new file mode 100644 index 00000000000..6855a34f79c --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_claim_transition_action/state_v0/mod.rs @@ -0,0 +1,107 @@ +use dpp::block::block_info::BlockInfo; +use dpp::consensus::ConsensusError; +use dpp::consensus::state::state_error::StateError; +use dpp::consensus::state::token::TokenMintPastMaxSupplyError; +use dpp::data_contract::accessors::v1::DataContractV1Getters; +use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::prelude::Identifier; +use dpp::validation::SimpleConsensusValidationResult; +use drive::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::{TokenClaimTransitionAction, TokenClaimTransitionActionAccessorsV0}; +use dpp::version::PlatformVersion; +use drive::error::drive::DriveError; +use drive::query::TransactionArg; +use crate::error::Error; +use crate::execution::types::execution_operation::ValidationOperation; +use crate::execution::types::state_transition_execution_context::{StateTransitionExecutionContext, StateTransitionExecutionContextMethodsV0}; +use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; +use crate::platform_types::platform::PlatformStateRef; + +pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenClaimTransitionActionStateValidationV0 { + fn validate_state_v0( + &self, + platform: &PlatformStateRef, + owner_id: Identifier, + block_info: &BlockInfo, + execution_context: &mut StateTransitionExecutionContext, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result; +} +impl TokenClaimTransitionActionStateValidationV0 for TokenClaimTransitionAction { + fn validate_state_v0( + &self, + platform: &PlatformStateRef, + owner_id: Identifier, + block_info: &BlockInfo, + execution_context: &mut StateTransitionExecutionContext, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + let validation_result = self.base().validate_state( + platform, + owner_id, + block_info, + execution_context, + transaction, + platform_version, + )?; + if !validation_result.is_valid() { + return Ok(validation_result); + } + + // Let's first check to see if we are authorized to perform this action + let contract = &self.data_contract_fetch_info_ref().contract; + let token_configuration = contract.expected_token_configuration(self.token_position())?; + + if let Some(max_supply) = token_configuration.max_supply() { + // We have a max supply, let's get the current supply + let (token_total_supply, fee) = platform.drive.fetch_token_total_supply_with_cost( + self.token_id().to_buffer(), + block_info, + transaction, + platform_version, + )?; + execution_context.add_operation(ValidationOperation::PrecalculatedOperation(fee)); + if let Some(token_total_supply) = token_total_supply { + if let Some(total_supply_after_release) = + token_total_supply.checked_add(self.amount()) + { + if total_supply_after_release > max_supply { + // We are trying to set a max supply smaller than the token total supply + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::StateError(StateError::TokenMintPastMaxSupplyError( + TokenMintPastMaxSupplyError::new( + self.token_id(), + self.amount(), + token_total_supply, + max_supply, + ), + )), + )); + } + } else { + // if we overflow we would also always go over max supply + return Ok(SimpleConsensusValidationResult::new_with_error( + ConsensusError::StateError(StateError::TokenMintPastMaxSupplyError( + TokenMintPastMaxSupplyError::new( + self.token_id(), + self.amount(), + token_total_supply, + max_supply, + ), + )), + )); + } + } else { + return Err(Error::Drive(drive::error::Error::Drive( + DriveError::CorruptedDriveState(format!( + "token {} total supply not found", + self.token_id() + )), + ))); + } + } + + Ok(SimpleConsensusValidationResult::new()) + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_config_update_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_config_update_transition_action/mod.rs index 8dc97773668..73c2958bb1f 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_config_update_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_config_update_transition_action/mod.rs @@ -8,18 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_config_update_transition_action::state_v0::TokenConfigUpdateTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_config_update_transition_action::structure_v0::TokenConfigUpdateTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; pub trait TokenConfigUpdateTransitionActionValidation { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result; - fn validate_state( &self, platform: &PlatformStateRef, @@ -32,26 +25,6 @@ pub trait TokenConfigUpdateTransitionActionValidation { } impl TokenConfigUpdateTransitionActionValidation for TokenConfigUpdateTransitionAction { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_config_update_transition_structure_validation - { - 0 => self.validate_structure_v0(platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenConfigUpdateTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_config_update_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_config_update_transition_action/structure_v0/mod.rs deleted file mode 100644 index 604c50480bc..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_config_update_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_config_update_transition_action::{TokenConfigUpdateTransitionAction, TokenConfigUpdateTransitionActionAccessorsV0}; -use dpp::version::PlatformVersion; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenConfigUpdateTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenConfigUpdateTransitionActionStructureValidationV0 for TokenConfigUpdateTransitionAction { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_destroy_frozen_funds_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_destroy_frozen_funds_transition_action/mod.rs index 513913539b8..1f073d1ccef 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_destroy_frozen_funds_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_destroy_frozen_funds_transition_action/mod.rs @@ -8,18 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_destroy_frozen_funds_transition_action::state_v0::TokenDestroyFrozenFundsTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_destroy_frozen_funds_transition_action::structure_v0::TokenDestroyFrozenFundsTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; pub trait TokenDestroyFrozenFundsTransitionActionValidation { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result; - fn validate_state( &self, platform: &PlatformStateRef, @@ -32,26 +25,6 @@ pub trait TokenDestroyFrozenFundsTransitionActionValidation { } impl TokenDestroyFrozenFundsTransitionActionValidation for TokenDestroyFrozenFundsTransitionAction { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_destroy_frozen_funds_transition_structure_validation - { - 0 => self.validate_structure_v0(platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenDestroyFrozenFundsTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_destroy_frozen_funds_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_destroy_frozen_funds_transition_action/structure_v0/mod.rs deleted file mode 100644 index 35537185729..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_destroy_frozen_funds_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_destroy_frozen_funds_transition_action::{TokenDestroyFrozenFundsTransitionAction, TokenDestroyFrozenFundsTransitionActionAccessorsV0}; -use dpp::version::PlatformVersion; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenDestroyFrozenFundsTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenDestroyFrozenFundsTransitionActionStructureValidationV0 - for TokenDestroyFrozenFundsTransitionAction -{ - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_emergency_action_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_emergency_action_transition_action/mod.rs index 074b2f59f1f..870b7714a40 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_emergency_action_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_emergency_action_transition_action/mod.rs @@ -8,18 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_emergency_action_transition_action::state_v0::TokenEmergencyActionTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_emergency_action_transition_action::structure_v0::TokenEmergencyActionTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; pub trait TokenEmergencyActionTransitionActionValidation { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result; - fn validate_state( &self, platform: &PlatformStateRef, @@ -32,26 +25,6 @@ pub trait TokenEmergencyActionTransitionActionValidation { } impl TokenEmergencyActionTransitionActionValidation for TokenEmergencyActionTransitionAction { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_emergency_action_transition_structure_validation - { - 0 => self.validate_structure_v0(platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenEmergencyActionTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_emergency_action_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_emergency_action_transition_action/structure_v0/mod.rs deleted file mode 100644 index e89432925e6..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_emergency_action_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_emergency_action_transition_action::{TokenEmergencyActionTransitionAction, TokenEmergencyActionTransitionActionAccessorsV0}; -use dpp::version::PlatformVersion; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenEmergencyActionTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenEmergencyActionTransitionActionStructureValidationV0 - for TokenEmergencyActionTransitionAction -{ - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_freeze_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_freeze_transition_action/mod.rs index a832de48109..ec68728f9a3 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_freeze_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_freeze_transition_action/mod.rs @@ -8,18 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_freeze_transition_action::state_v0::TokenFreezeTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_freeze_transition_action::structure_v0::TokenFreezeTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; pub trait TokenFreezeTransitionActionValidation { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result; - fn validate_state( &self, platform: &PlatformStateRef, @@ -32,26 +25,6 @@ pub trait TokenFreezeTransitionActionValidation { } impl TokenFreezeTransitionActionValidation for TokenFreezeTransitionAction { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_freeze_transition_structure_validation - { - 0 => self.validate_structure_v0(platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenFreezeTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_freeze_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_freeze_transition_action/structure_v0/mod.rs deleted file mode 100644 index d2c641554c3..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_freeze_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_freeze_transition_action::{TokenFreezeTransitionAction, TokenFreezeTransitionActionAccessorsV0}; -use dpp::version::PlatformVersion; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenFreezeTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenFreezeTransitionActionStructureValidationV0 for TokenFreezeTransitionAction { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_mint_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_mint_transition_action/mod.rs index 56c8d556a74..eb22f0df862 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_mint_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_mint_transition_action/mod.rs @@ -8,18 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_mint_transition_action::state_v0::TokenMintTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_mint_transition_action::structure_v0::TokenMintTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; pub trait TokenMintTransitionActionValidation { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result; - fn validate_state( &self, platform: &PlatformStateRef, @@ -32,26 +25,6 @@ pub trait TokenMintTransitionActionValidation { } impl TokenMintTransitionActionValidation for TokenMintTransitionAction { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_mint_transition_structure_validation - { - 0 => self.validate_structure_v0(platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenMintTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, @@ -66,7 +39,7 @@ impl TokenMintTransitionActionValidation for TokenMintTransitionAction { .validation_and_processing .state_transitions .batch_state_transition - .token_issuance_transition_state_validation + .token_mint_transition_state_validation { 0 => self.validate_state_v0( platform, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_mint_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_mint_transition_action/structure_v0/mod.rs deleted file mode 100644 index 7eb4aab96d5..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_mint_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_mint_transition_action::{TokenMintTransitionAction, TokenMintTransitionActionAccessorsV0}; -use dpp::version::PlatformVersion; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenMintTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenMintTransitionActionStructureValidationV0 for TokenMintTransitionAction { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_transfer_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_transfer_transition_action/mod.rs index 924b4b80a24..2bcc691a489 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_transfer_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_transfer_transition_action/mod.rs @@ -8,19 +8,11 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_transfer_transition_action::state_v0::TokenTransferTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_transfer_transition_action::structure_v0::TokenTransferTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; pub trait TokenTransferTransitionActionValidation { - fn validate_structure( - &self, - owner_id: Identifier, - platform_version: &PlatformVersion, - ) -> Result; - fn validate_state( &self, platform: &PlatformStateRef, @@ -33,27 +25,6 @@ pub trait TokenTransferTransitionActionValidation { } impl TokenTransferTransitionActionValidation for TokenTransferTransitionAction { - fn validate_structure( - &self, - owner_id: Identifier, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_transfer_transition_structure_validation - { - 0 => self.validate_structure_v0(owner_id, platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenTransferTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, @@ -68,7 +39,7 @@ impl TokenTransferTransitionActionValidation for TokenTransferTransitionAction { .validation_and_processing .state_transitions .batch_state_transition - .token_issuance_transition_state_validation + .token_transfer_transition_state_validation { 0 => self.validate_state_v0( platform, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_transfer_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_transfer_transition_action/structure_v0/mod.rs deleted file mode 100644 index 2b0fc92e204..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_transfer_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,29 +0,0 @@ -use dpp::identifier::Identifier; -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_transfer_transition_action::TokenTransferTransitionAction; -use dpp::version::PlatformVersion; -use drive::state_transition_action::batch::batched_transition::token_transition::token_transfer_transition_action::v0::TokenTransferTransitionActionAccessorsV0; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenTransferTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - owner_id: Identifier, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenTransferTransitionActionStructureValidationV0 for TokenTransferTransitionAction { - fn validate_structure_v0( - &self, - _owner_id: Identifier, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_unfreeze_transition_action/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_unfreeze_transition_action/mod.rs index 293906e3f0c..629be08661b 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_unfreeze_transition_action/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_unfreeze_transition_action/mod.rs @@ -8,18 +8,18 @@ use crate::error::Error; use crate::error::execution::ExecutionError; use crate::execution::types::state_transition_execution_context::StateTransitionExecutionContext; use crate::execution::validation::state_transition::batch::action_validation::token::token_unfreeze_transition_action::state_v0::TokenUnfreezeTransitionActionStateValidationV0; -use crate::execution::validation::state_transition::batch::action_validation::token::token_unfreeze_transition_action::structure_v0::TokenUnfreezeTransitionActionStructureValidationV0; use crate::platform_types::platform::PlatformStateRef; mod state_v0; -mod structure_v0; -pub trait TokenUnfreezeTransitionActionValidation { +pub trait TokenUnfreezeTransitionStructureValidation { fn validate_structure( &self, platform_version: &PlatformVersion, ) -> Result; +} +pub trait TokenUnfreezeTransitionActionValidation { fn validate_state( &self, platform: &PlatformStateRef, @@ -32,26 +32,6 @@ pub trait TokenUnfreezeTransitionActionValidation { } impl TokenUnfreezeTransitionActionValidation for TokenUnfreezeTransitionAction { - fn validate_structure( - &self, - platform_version: &PlatformVersion, - ) -> Result { - match platform_version - .drive_abci - .validation_and_processing - .state_transitions - .batch_state_transition - .token_unfreeze_transition_structure_validation - { - 0 => self.validate_structure_v0(platform_version), - version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { - method: "TokenUnfreezeTransitionAction::validate_structure".to_string(), - known_versions: vec![0], - received: version, - })), - } - } - fn validate_state( &self, platform: &PlatformStateRef, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_unfreeze_transition_action/structure_v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_unfreeze_transition_action/structure_v0/mod.rs deleted file mode 100644 index f3a0b24e337..00000000000 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/action_validation/token/token_unfreeze_transition_action/structure_v0/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use dpp::validation::{SimpleConsensusValidationResult}; -use drive::state_transition_action::batch::batched_transition::token_transition::token_unfreeze_transition_action::{TokenUnfreezeTransitionAction, TokenUnfreezeTransitionActionAccessorsV0}; -use dpp::version::PlatformVersion; -use crate::error::Error; -use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; - -pub(in crate::execution::validation::state_transition::state_transitions::batch::action_validation) trait TokenUnfreezeTransitionActionStructureValidationV0 { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result; -} -impl TokenUnfreezeTransitionActionStructureValidationV0 for TokenUnfreezeTransitionAction { - fn validate_structure_v0( - &self, - platform_version: &PlatformVersion, - ) -> Result { - let validation_result = self.base().validate_structure(platform_version)?; - if !validation_result.is_valid() { - return Ok(validation_result); - } - - Ok(SimpleConsensusValidationResult::default()) - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/advanced_structure/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/advanced_structure/v0/mod.rs index 1147fa871c4..0032ff57537 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/advanced_structure/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/advanced_structure/v0/mod.rs @@ -29,14 +29,6 @@ use drive::state_transition_action::batch::batched_transition::document_transiti use drive::state_transition_action::batch::batched_transition::document_transition::document_transfer_transition_action::DocumentTransferTransitionActionAccessorsV0; use drive::state_transition_action::batch::batched_transition::document_transition::document_update_price_transition_action::DocumentUpdatePriceTransitionActionAccessorsV0; use drive::state_transition_action::batch::batched_transition::document_transition::DocumentTransitionAction; -use drive::state_transition_action::batch::batched_transition::token_transition::token_config_update_transition_action::TokenConfigUpdateTransitionActionAccessorsV0; -use drive::state_transition_action::batch::batched_transition::token_transition::token_destroy_frozen_funds_transition_action::TokenDestroyFrozenFundsTransitionActionAccessorsV0; -use drive::state_transition_action::batch::batched_transition::token_transition::token_emergency_action_transition_action::TokenEmergencyActionTransitionActionAccessorsV0; -use drive::state_transition_action::batch::batched_transition::token_transition::token_freeze_transition_action::TokenFreezeTransitionActionAccessorsV0; -use drive::state_transition_action::batch::batched_transition::token_transition::token_mint_transition_action::TokenMintTransitionActionAccessorsV0; -use drive::state_transition_action::batch::batched_transition::token_transition::token_transfer_transition_action::v0::TokenTransferTransitionActionAccessorsV0; -use drive::state_transition_action::batch::batched_transition::token_transition::token_unfreeze_transition_action::TokenUnfreezeTransitionActionAccessorsV0; -use drive::state_transition_action::batch::batched_transition::token_transition::TokenTransitionAction; use drive::state_transition_action::StateTransitionAction; use drive::state_transition_action::system::bump_identity_data_contract_nonce_action::BumpIdentityDataContractNonceAction; use crate::error::execution::ExecutionError; @@ -45,14 +37,7 @@ use crate::execution::types::state_transition_execution_context::{StateTransitio use crate::execution::validation::state_transition::batch::action_validation::document::document_purchase_transition_action::DocumentPurchaseTransitionActionValidation; use crate::execution::validation::state_transition::batch::action_validation::document::document_transfer_transition_action::DocumentTransferTransitionActionValidation; use crate::execution::validation::state_transition::batch::action_validation::document::document_update_price_transition_action::DocumentUpdatePriceTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_burn_transition_action::TokenBurnTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_config_update_transition_action::TokenConfigUpdateTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_destroy_frozen_funds_transition_action::TokenDestroyFrozenFundsTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_emergency_action_transition_action::TokenEmergencyActionTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_freeze_transition_action::TokenFreezeTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_mint_transition_action::TokenMintTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_transfer_transition_action::TokenTransferTransitionActionValidation; -use crate::execution::validation::state_transition::batch::action_validation::token::token_unfreeze_transition_action::TokenUnfreezeTransitionActionValidation; +use crate::execution::validation::state_transition::batch::action_validation::token::token_base_transition_action::TokenBaseTransitionActionValidation; pub(in crate::execution::validation::state_transition::state_transitions::batch) trait DocumentsBatchStateTransitionStructureValidationV0 { @@ -231,117 +216,22 @@ impl DocumentsBatchStateTransitionStructureValidationV0 for BatchTransition { } } }, - BatchedTransitionAction::TokenAction(token_action) => match token_action { - TokenTransitionAction::BurnAction(burn_action) => { - let result = burn_action.validate_structure(platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(token_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } - } - TokenTransitionAction::MintAction(mint_action) => { - let result = mint_action.validate_structure(platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(mint_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } + BatchedTransitionAction::TokenAction(token_transition_action) => { + // token actions only need to do advanced structure validation on the base action + let result = token_transition_action + .base() + .validate_structure(platform_version)?; + if !result.is_valid() { + let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( + BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(token_transition_action.base(), self.owner_id(), self.user_fee_increase()), + ); + + return Ok(ConsensusValidationResult::new_with_data_and_errors( + bump_action, + result.errors, + )); } - TokenTransitionAction::TransferAction(transfer_action) => { - let result = transfer_action - .validate_structure(self.owner_id(), platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(transfer_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } - } - TokenTransitionAction::FreezeAction(freeze_action) => { - let result = freeze_action.validate_structure(platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(freeze_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } - } - TokenTransitionAction::UnfreezeAction(unfreeze_action) => { - let result = unfreeze_action.validate_structure(platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(unfreeze_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } - } - TokenTransitionAction::EmergencyActionAction(emergency_action_action) => { - let result = - emergency_action_action.validate_structure(platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(emergency_action_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } - } - TokenTransitionAction::DestroyFrozenFundsAction( - destroy_frozen_funds_action, - ) => { - let result = - destroy_frozen_funds_action.validate_structure(platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(destroy_frozen_funds_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } - } - TokenTransitionAction::ConfigUpdateAction(config_update_action) => { - let result = config_update_action.validate_structure(platform_version)?; - if !result.is_valid() { - let bump_action = StateTransitionAction::BumpIdentityDataContractNonceAction( - BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition_action(config_update_action.base(), self.owner_id(), self.user_fee_increase()), - ); - - return Ok(ConsensusValidationResult::new_with_data_and_errors( - bump_action, - result.errors, - )); - } - } - }, + } BatchedTransitionAction::BumpIdentityDataContractNonce(_) => { return Err(Error::Execution(ExecutionError::CorruptedCodeExecution( "we should not have a bump identity contract nonce at this stage", diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/mod.rs index 7b38ca9b32f..af6a8cb1caf 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/mod.rs @@ -7,6 +7,9 @@ mod is_allowed; mod state; mod transformer; +#[cfg(test)] +mod tests; + use dpp::block::block_info::BlockInfo; use dpp::dashcore::Network; use dpp::identity::PartialIdentity; @@ -236,16100 +239,3 @@ impl StateTransitionStateValidationV0 for BatchTransition { } } } - -#[cfg(test)] -mod tests { - use crate::execution::validation::state_transition::state_transitions::tests::setup_identity; - use crate::platform_types::platform_state::v0::PlatformStateV0Methods; - use crate::platform_types::state_transitions_processing_result::StateTransitionExecutionResult; - use crate::test::helpers::setup::TestPlatformBuilder; - use assert_matches::assert_matches; - use dpp::block::block_info::BlockInfo; - use dpp::consensus::basic::BasicError; - use dpp::consensus::state::state_error::StateError; - use dpp::consensus::ConsensusError; - use dpp::dash_to_credits; - use dpp::data_contract::accessors::v0::DataContractV0Getters; - use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; - use dpp::data_contract::document_type::random_document::{ - CreateRandomDocument, DocumentFieldFillSize, DocumentFieldFillType, - }; - use dpp::data_contract::group::v0::GroupV0; - use dpp::data_contract::group::Group; - use dpp::document::document_methods::DocumentMethodsV0; - use dpp::document::transfer::Transferable; - use dpp::document::{DocumentV0Getters, DocumentV0Setters}; - use dpp::fee::fee_result::BalanceChange; - use dpp::fee::Credits; - use dpp::group::GroupStateTransitionInfo; - use dpp::identifier::Identifier; - use dpp::identity::accessors::IdentityGettersV0; - use dpp::nft::TradeMode; - use dpp::platform_value::btreemap_extensions::BTreeValueMapHelper; - use dpp::platform_value::{Bytes32, Value}; - use dpp::serialization::PlatformSerializable; - use dpp::state_transition::batch_transition::methods::v0::DocumentsBatchTransitionMethodsV0; - use dpp::state_transition::batch_transition::BatchTransition; - use dpp::tests::json_document::json_document_to_contract; - use drive::drive::document::query::QueryDocumentsOutcomeV0Methods; - use drive::drive::document::query::QueryDocumentsWithFlagsOutcomeV0Methods; - use drive::query::DriveDocumentQuery; - use drive::util::storage_flags::StorageFlags; - use platform_version::version::PlatformVersion; - use rand::prelude::StdRng; - use rand::Rng; - use rand::SeedableRng; - - mod creation_tests { - use dapi_grpc::platform::v0::{get_contested_resource_vote_state_request, get_contested_resource_vote_state_response, GetContestedResourceVoteStateRequest, GetContestedResourceVoteStateResponse}; - use dapi_grpc::platform::v0::get_contested_resource_vote_state_request::get_contested_resource_vote_state_request_v0::ResultType; - use dapi_grpc::platform::v0::get_contested_resource_vote_state_request::GetContestedResourceVoteStateRequestV0; - use dapi_grpc::platform::v0::get_contested_resource_vote_state_response::{get_contested_resource_vote_state_response_v0, GetContestedResourceVoteStateResponseV0}; - use super::*; - use assert_matches::assert_matches; - use rand::distributions::Standard; - use dpp::consensus::basic::document::DocumentFieldMaxSizeExceededError; - use dpp::consensus::ConsensusError; - use dpp::consensus::basic::BasicError; - use dpp::fee::fee_result::refunds::FeeRefunds; - use dpp::fee::fee_result::FeeResult; - use dpp::data_contract::accessors::v0::DataContractV0Setters; - use dpp::data_contract::document_type::restricted_creation::CreationRestrictionMode; - use dpp::document::Document; - use dpp::document::serialization_traits::DocumentPlatformConversionMethodsV0; - use dpp::util::hash::hash_double; - use dpp::voting::vote_choices::resource_vote_choice::ResourceVoteChoice; - use dpp::voting::vote_choices::resource_vote_choice::ResourceVoteChoice::TowardsIdentity; - use drive::util::object_size_info::DataContractResolvedInfo; - use drive::drive::votes::resolved::vote_polls::contested_document_resource_vote_poll::ContestedDocumentResourceVotePollWithContractInfoAllowBorrowed; - use drive::query::vote_poll_vote_state_query::ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally; - use drive::query::vote_poll_vote_state_query::ResolvedContestedDocumentVotePollDriveQuery; - use drive::util::test_helpers::setup_contract; - use crate::execution::validation::state_transition::state_transitions::tests::{add_contender_to_dpns_name_contest, create_dpns_identity_name_contest, create_dpns_name_contest_give_key_info, perform_votes_multi}; - use crate::platform_types::platform_state::v0::PlatformStateV0Methods; - use crate::platform_types::state_transitions_processing_result::StateTransitionExecutionResult::PaidConsensusError; - use crate::test::helpers::fast_forward_to_block::fast_forward_to_block; - use dpp::consensus::state::state_error::StateError; - use dpp::dashcore::Network; - use dpp::dashcore::Network::Testnet; - use dpp::data_contract::DataContract; - use dpp::identity::SecurityLevel; - use dpp::state_transition::batch_transition::document_base_transition::DocumentBaseTransition; - use dpp::state_transition::batch_transition::document_create_transition::DocumentCreateTransitionV0; - use dpp::state_transition::batch_transition::{DocumentCreateTransition, BatchTransitionV0}; - use dpp::state_transition::StateTransition; - use crate::config::PlatformConfig; - - #[test] - fn test_document_creation() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - - #[test] - fn test_document_creation_should_fail_if_reusing_entropy() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - // Now let's create a second document with the same entropy - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/coy.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::DocumentAlreadyPresentError { .. }), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - - #[test] - fn test_document_creation_with_very_big_field() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay_contract_no_max_length = setup_contract( - &platform.drive, - "tests/supporting_files/contract/dashpay/dashpay-contract-no-max-length.json", - None, - None, - None::, - None, - None, - ); - - let dashpay_contract = dashpay_contract_no_max_length.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let max_field_size = platform_version.system_limits.max_field_value_size; - let avatar_size = max_field_size + 1000; - - document.set( - "avatar", - Value::Bytes( - rng.sample_iter(Standard) - .take(avatar_size as usize) - .collect(), - ), - ); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - assert_eq!( - processing_result.execution_results().first().unwrap(), - &PaidConsensusError( - ConsensusError::BasicError(BasicError::DocumentFieldMaxSizeExceededError( - DocumentFieldMaxSizeExceededError::new( - "avatar".to_string(), - avatar_size as u64, - max_field_size as u64 - ) - )), - FeeResult { - storage_fee: 11556000, - processing_fee: 526140, - fee_refunds: FeeRefunds::default(), - removed_bytes_from_system: 0 - } - ) - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - - #[test] - fn test_document_creation_on_contested_unique_index() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity_1, signer_1, key_1) = - setup_identity(&mut platform, 958, dash_to_credits!(0.5)); - - let (identity_2, signer_2, key_2) = - setup_identity(&mut platform, 93, dash_to_credits!(0.5)); - - let dpns = platform.drive.cache.system_data_contracts.load_dpns(); - let dpns_contract = dpns.clone(); - - let preorder = dpns_contract - .document_type_for_name("preorder") - .expect("expected a profile document type"); - - assert!(!preorder.documents_mutable()); - assert!(preorder.documents_can_be_deleted()); - assert!(!preorder.documents_transferable().is_transferable()); - - let domain = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type"); - - assert!(!domain.documents_mutable()); - // Deletion is disabled with data trigger - assert!(domain.documents_can_be_deleted()); - assert!(domain.documents_transferable().is_transferable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut preorder_document_1 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut preorder_document_2 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_1 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_2 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document_1.set("parentDomainName", "dash".into()); - document_1.set("normalizedParentDomainName", "dash".into()); - document_1.set("label", "quantum".into()); - document_1.set("normalizedLabel", "quantum".into()); - document_1.set("records.identity", document_1.owner_id().into()); - document_1.set("subdomainRules.allowSubdomains", false.into()); - - document_2.set("parentDomainName", "dash".into()); - document_2.set("normalizedParentDomainName", "dash".into()); - document_2.set("label", "quantum".into()); - document_2.set("normalizedLabel", "quantum".into()); - document_2.set("records.identity", document_2.owner_id().into()); - document_2.set("subdomainRules.allowSubdomains", false.into()); - - let salt_1: [u8; 32] = rng.gen(); - let salt_2: [u8; 32] = rng.gen(); - - let mut salted_domain_buffer_1: Vec = vec![]; - salted_domain_buffer_1.extend(salt_1); - salted_domain_buffer_1.extend("quantum.dash".as_bytes()); - - let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); - - let mut salted_domain_buffer_2: Vec = vec![]; - salted_domain_buffer_2.extend(salt_2); - salted_domain_buffer_2.extend("quantum.dash".as_bytes()); - - let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); - - preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); - preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); - - document_1.set("preorderSalt", salt_1.into()); - document_2.set("preorderSalt", salt_2.into()); - - let documents_batch_create_preorder_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_1, - preorder, - entropy.0, - &key_1, - 2, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_1 = - documents_batch_create_preorder_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_preorder_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_2, - preorder, - entropy.0, - &key_2, - 2, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_2 = - documents_batch_create_preorder_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - document_1, - domain, - entropy.0, - &key_1, - 3, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_1 = - documents_batch_create_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - document_2, - domain, - entropy.0, - &key_2, - 3, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_2 = - documents_batch_create_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_preorder_transition_1.clone(), - documents_batch_create_serialized_preorder_transition_2.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 2); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_transition_1.clone(), - documents_batch_create_serialized_transition_2.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 2); - - // Now let's run a query for the vote totals - - let config = bincode::config::standard() - .with_big_endian() - .with_no_limit(); - - let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) - .expect("expected to encode the word dash"); - - let quantum_encoded = - bincode::encode_to_vec(Value::Text("quantum".to_string()), config) - .expect("expected to encode the word quantum"); - - let index_name = "parentNameAndLabel".to_string(); - - let query_validation_result = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: domain.name().clone(), - index_name: index_name.clone(), - index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: false, - start_at_identifier_info: None, - count: None, - prove: false, - }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = query_validation_result.version.expect("expected a version"); - - let Some( - get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( - get_contested_resource_vote_state_response_v0::ContestedResourceContenders { - contenders, - abstain_vote_tally, - lock_vote_tally, - finished_vote_info, - }, - ), - ) = result - else { - panic!("expected contenders") - }; - - assert_eq!(abstain_vote_tally, None); - - assert_eq!(lock_vote_tally, None); - - assert_eq!(finished_vote_info, None); - - assert_eq!(contenders.len(), 2); - - let first_contender = contenders.first().unwrap(); - - let second_contender = contenders.last().unwrap(); - - let first_contender_document = Document::from_bytes( - first_contender - .document - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - let second_contender_document = Document::from_bytes( - second_contender - .document - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - assert_ne!(first_contender_document, second_contender_document); - - assert_eq!(first_contender.identifier, identity_1.id().to_vec()); - - assert_eq!(second_contender.identifier, identity_2.id().to_vec()); - - assert_eq!(first_contender.vote_count, Some(0)); - - assert_eq!(second_contender.vote_count, Some(0)); - - let GetContestedResourceVoteStateResponse { version } = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: domain.name().clone(), - index_name: "parentNameAndLabel".to_string(), - index_values: vec![dash_encoded, quantum_encoded], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: true, - start_at_identifier_info: None, - count: None, - prove: true, - }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = version.expect("expected a version"); - - let Some(get_contested_resource_vote_state_response_v0::Result::Proof(proof)) = result - else { - panic!("expected contenders") - }; - - let resolved_contested_document_vote_poll_drive_query = - ResolvedContestedDocumentVotePollDriveQuery { - vote_poll: ContestedDocumentResourceVotePollWithContractInfoAllowBorrowed { - contract: DataContractResolvedInfo::BorrowedDataContract(&dpns_contract), - document_type_name: domain.name().clone(), - index_name: index_name.clone(), - index_values: vec![ - Value::Text("dash".to_string()), - Value::Text("quantum".to_string()), - ], - }, - result_type: DocumentsAndVoteTally, - offset: None, - limit: None, - start_at: None, - allow_include_locked_and_abstaining_vote_tally: true, - }; - - let (_root_hash, result) = resolved_contested_document_vote_poll_drive_query - .verify_vote_poll_vote_state_proof(proof.grovedb_proof.as_ref(), platform_version) - .expect("expected to verify proof"); - - let contenders = result.contenders; - assert_eq!(contenders.len(), 2); - - let first_contender = contenders.first().unwrap(); - - let second_contender = contenders.last().unwrap(); - - let first_contender_document = Document::from_bytes( - first_contender - .serialized_document() - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - let second_contender_document = Document::from_bytes( - second_contender - .serialized_document() - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - assert_ne!(first_contender_document, second_contender_document); - - assert_eq!(first_contender.identity_id(), identity_1.id()); - - assert_eq!(second_contender.identity_id(), identity_2.id()); - - assert_eq!(first_contender.vote_tally(), Some(0)); - - assert_eq!(second_contender.vote_tally(), Some(0)); - } - - #[test] - fn test_document_creation_on_contested_unique_index_should_fail_if_not_paying_for_it() { - let platform_version = PlatformVersion::latest(); - let platform_config = PlatformConfig { - network: Network::Dash, - ..Default::default() - }; - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .with_config(platform_config) - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity_1, signer_1, key_1) = - setup_identity(&mut platform, 958, dash_to_credits!(0.5)); - - let dpns = platform.drive.cache.system_data_contracts.load_dpns(); - let dpns_contract = dpns.clone(); - - let preorder = dpns_contract - .document_type_for_name("preorder") - .expect("expected a profile document type"); - - assert!(!preorder.documents_mutable()); - assert!(preorder.documents_can_be_deleted()); - assert!(!preorder.documents_transferable().is_transferable()); - - let domain = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type"); - - assert!(!domain.documents_mutable()); - // Deletion is disabled with data trigger - assert!(domain.documents_can_be_deleted()); - assert!(domain.documents_transferable().is_transferable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut preorder_document_1 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_1 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document_1.set("parentDomainName", "dash".into()); - document_1.set("normalizedParentDomainName", "dash".into()); - document_1.set("label", "quantum".into()); - document_1.set("normalizedLabel", "quantum".into()); - document_1.set("records.identity", document_1.owner_id().into()); - document_1.set("subdomainRules.allowSubdomains", false.into()); - - let salt_1: [u8; 32] = rng.gen(); - - let mut salted_domain_buffer_1: Vec = vec![]; - salted_domain_buffer_1.extend(salt_1); - salted_domain_buffer_1.extend("quantum.dash".as_bytes()); - - let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); - - preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); - - document_1.set("preorderSalt", salt_1.into()); - - let documents_batch_create_preorder_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_1, - preorder, - entropy.0, - &key_1, - 2, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_1 = - documents_batch_create_preorder_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let owner_id = document_1.owner_id(); - let create_transition: DocumentCreateTransition = DocumentCreateTransitionV0 { - base: DocumentBaseTransition::from_document( - &document_1, - domain, - 3, - platform_version, - None, - ) - .expect("expected a base transition"), - entropy: entropy.0, - data: document_1.clone().properties_consumed(), - // Sending 0 balance that should not be valid - prefunded_voting_balance: None, - } - .into(); - let documents_batch_inner_create_transition_1: BatchTransition = BatchTransitionV0 { - owner_id, - transitions: vec![create_transition.into()], - user_fee_increase: 0, - signature_public_key_id: 0, - signature: Default::default(), - } - .into(); - let mut documents_batch_create_transition_1: StateTransition = - documents_batch_inner_create_transition_1.into(); - documents_batch_create_transition_1 - .sign_external(&key_1, &signer_1, Some(|_, _| Ok(SecurityLevel::HIGH))) - .expect("expected to sign"); - - let documents_batch_create_serialized_transition_1 = - documents_batch_create_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_preorder_transition_1.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 1); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition_1.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [PaidConsensusError( - ConsensusError::StateError(StateError::DocumentContestNotPaidForError(_)), - _ - )] - ); - - // Now let's run a query for the vote totals - - let config = bincode::config::standard() - .with_big_endian() - .with_no_limit(); - - let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) - .expect("expected to encode the word dash"); - - let quantum_encoded = - bincode::encode_to_vec(Value::Text("quantum".to_string()), config) - .expect("expected to encode the word quantum"); - - let index_name = "parentNameAndLabel".to_string(); - - let query_validation_result = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: domain.name().clone(), - index_name: index_name.clone(), - index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: false, - start_at_identifier_info: None, - count: None, - prove: false, - }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = query_validation_result.version.expect("expected a version"); - - let Some( - get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( - get_contested_resource_vote_state_response_v0::ContestedResourceContenders { - contenders, - abstain_vote_tally, - lock_vote_tally, - finished_vote_info, - }, - ), - ) = result - else { - panic!("expected contenders") - }; - - assert_eq!(abstain_vote_tally, None); - - assert_eq!(lock_vote_tally, None); - - assert_eq!(finished_vote_info, None); - - assert_eq!(contenders.len(), 0); - - let drive_query = DriveDocumentQuery::new_primary_key_single_item_query( - &dpns, - domain, - document_1.id(), - ); - - let documents = platform - .drive - .query_documents(drive_query, None, false, None, None) - .expect("expected to get back documents") - .documents_owned(); - - assert!(documents.first().is_none()); - } - - #[test] - fn test_document_creation_on_contested_unique_index_should_not_fail_if_not_paying_for_it_on_testnet_before_epoch_2080( - ) { - let platform_version = PlatformVersion::latest(); - let platform_config = PlatformConfig { - network: Testnet, - ..Default::default() - }; - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .with_config(platform_config) - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity_1, signer_1, key_1) = - setup_identity(&mut platform, 958, dash_to_credits!(0.5)); - - let dpns = platform.drive.cache.system_data_contracts.load_dpns(); - let dpns_contract = dpns.clone(); - - let preorder = dpns_contract - .document_type_for_name("preorder") - .expect("expected a profile document type"); - - assert!(!preorder.documents_mutable()); - assert!(preorder.documents_can_be_deleted()); - assert!(!preorder.documents_transferable().is_transferable()); - - let domain = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type"); - - assert!(!domain.documents_mutable()); - // Deletion is disabled with data trigger - assert!(domain.documents_can_be_deleted()); - assert!(domain.documents_transferable().is_transferable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut preorder_document_1 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_1 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document_1.set("parentDomainName", "dash".into()); - document_1.set("normalizedParentDomainName", "dash".into()); - document_1.set("label", "quantum".into()); - document_1.set("normalizedLabel", "quantum".into()); - document_1.set("records.identity", document_1.owner_id().into()); - document_1.set("subdomainRules.allowSubdomains", false.into()); - - let salt_1: [u8; 32] = rng.gen(); - - let mut salted_domain_buffer_1: Vec = vec![]; - salted_domain_buffer_1.extend(salt_1); - salted_domain_buffer_1.extend("quantum.dash".as_bytes()); - - let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); - - preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); - - document_1.set("preorderSalt", salt_1.into()); - - let documents_batch_create_preorder_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_1, - preorder, - entropy.0, - &key_1, - 2, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_1 = - documents_batch_create_preorder_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let owner_id = document_1.owner_id(); - let create_transition: DocumentCreateTransition = DocumentCreateTransitionV0 { - base: DocumentBaseTransition::from_document( - &document_1, - domain, - 3, - platform_version, - None, - ) - .expect("expected a base transition"), - entropy: entropy.0, - data: document_1.clone().properties_consumed(), - prefunded_voting_balance: None, - } - .into(); - let documents_batch_inner_create_transition_1: BatchTransition = BatchTransitionV0 { - owner_id, - transitions: vec![create_transition.into()], - user_fee_increase: 0, - signature_public_key_id: 0, - signature: Default::default(), - } - .into(); - let mut documents_batch_create_transition_1: StateTransition = - documents_batch_inner_create_transition_1.into(); - documents_batch_create_transition_1 - .sign_external(&key_1, &signer_1, Some(|_, _| Ok(SecurityLevel::HIGH))) - .expect("expected to sign"); - - let documents_batch_create_serialized_transition_1 = - documents_batch_create_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_preorder_transition_1.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 1); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition_1.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(..)] - ); - - // Now let's run a query for the vote totals - - let config = bincode::config::standard() - .with_big_endian() - .with_no_limit(); - - let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) - .expect("expected to encode the word dash"); - - let quantum_encoded = - bincode::encode_to_vec(Value::Text("quantum".to_string()), config) - .expect("expected to encode the word quantum"); - - let index_name = "parentNameAndLabel".to_string(); - - let query_validation_result = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: domain.name().clone(), - index_name: index_name.clone(), - index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: false, - start_at_identifier_info: None, - count: None, - prove: false, - }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = query_validation_result.version.expect("expected a version"); - - let Some( - get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( - get_contested_resource_vote_state_response_v0::ContestedResourceContenders { - contenders, - abstain_vote_tally, - lock_vote_tally, - finished_vote_info, - }, - ), - ) = result - else { - panic!("expected contenders") - }; - - assert_eq!(abstain_vote_tally, None); - - assert_eq!(lock_vote_tally, None); - - assert_eq!(finished_vote_info, None); - - assert_eq!(contenders.len(), 0); // no contenders should have been created the document should just exist - - let drive_query = DriveDocumentQuery::new_primary_key_single_item_query( - &dpns, - domain, - document_1.id(), - ); - - let documents = platform - .drive - .query_documents(drive_query, None, false, None, None) - .expect("expected to get back documents") - .documents_owned(); - - assert!(documents.first().is_some()); - } - - #[test] - fn test_document_creation_on_contested_unique_index_should_fail_if_reusing_entropy() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity_1, signer_1, key_1) = - setup_identity(&mut platform, 958, dash_to_credits!(0.5)); - - let (identity_2, signer_2, key_2) = - setup_identity(&mut platform, 93, dash_to_credits!(0.5)); - - let dpns = platform.drive.cache.system_data_contracts.load_dpns(); - let dpns_contract = dpns.clone(); - - let preorder = dpns_contract - .document_type_for_name("preorder") - .expect("expected a profile document type"); - - assert!(!preorder.documents_mutable()); - assert!(preorder.documents_can_be_deleted()); - assert!(!preorder.documents_transferable().is_transferable()); - - let domain = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type"); - - assert!(!domain.documents_mutable()); - // Deletion is disabled with data trigger - assert!(domain.documents_can_be_deleted()); - assert!(domain.documents_transferable().is_transferable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut preorder_document_1 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut preorder_document_2 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let new_entropy = Bytes32::random_with_rng(&mut rng); - - let mut preorder_document_3_on_identity_1 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - new_entropy, //change entropy here - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_1 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_2 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_3_on_identity_1 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, //same entropy - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document_1.set("parentDomainName", "dash".into()); - document_1.set("normalizedParentDomainName", "dash".into()); - document_1.set("label", "quantum".into()); - document_1.set("normalizedLabel", "quantum".into()); - document_1.set("records.identity", document_1.owner_id().into()); - document_1.set("subdomainRules.allowSubdomains", false.into()); - - document_2.set("parentDomainName", "dash".into()); - document_2.set("normalizedParentDomainName", "dash".into()); - document_2.set("label", "quantum".into()); - document_2.set("normalizedLabel", "quantum".into()); - document_2.set("records.identity", document_2.owner_id().into()); - document_2.set("subdomainRules.allowSubdomains", false.into()); - - document_3_on_identity_1.set("parentDomainName", "dash".into()); - document_3_on_identity_1.set("normalizedParentDomainName", "dash".into()); - document_3_on_identity_1.set("label", "cry".into()); - document_3_on_identity_1.set("normalizedLabel", "cry".into()); - document_3_on_identity_1.set( - "records.identity", - document_3_on_identity_1.owner_id().into(), - ); - document_3_on_identity_1.set("subdomainRules.allowSubdomains", false.into()); - - let salt_1: [u8; 32] = rng.gen(); - let salt_2: [u8; 32] = rng.gen(); - let salt_3: [u8; 32] = rng.gen(); - - let mut salted_domain_buffer_1: Vec = vec![]; - salted_domain_buffer_1.extend(salt_1); - salted_domain_buffer_1.extend("quantum.dash".as_bytes()); - - let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); - - let mut salted_domain_buffer_2: Vec = vec![]; - salted_domain_buffer_2.extend(salt_2); - salted_domain_buffer_2.extend("quantum.dash".as_bytes()); - - let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); - - let mut salted_domain_buffer_3: Vec = vec![]; - salted_domain_buffer_3.extend(salt_3); - salted_domain_buffer_3.extend("cry.dash".as_bytes()); - - let salted_domain_hash_3 = hash_double(salted_domain_buffer_3); - - preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); - preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); - preorder_document_3_on_identity_1.set("saltedDomainHash", salted_domain_hash_3.into()); - - document_1.set("preorderSalt", salt_1.into()); - document_2.set("preorderSalt", salt_2.into()); - document_3_on_identity_1.set("preorderSalt", salt_3.into()); - - let documents_batch_create_preorder_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_1, - preorder, - entropy.0, - &key_1, - 2, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_1 = - documents_batch_create_preorder_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_preorder_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_2, - preorder, - entropy.0, - &key_2, - 2, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_2 = - documents_batch_create_preorder_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_preorder_transition_3 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_3_on_identity_1, - preorder, - new_entropy.0, - &key_1, - 3, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_3 = - documents_batch_create_preorder_transition_3 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - document_1, - domain, - entropy.0, - &key_1, - 4, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_1 = - documents_batch_create_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - document_2, - domain, - entropy.0, - &key_2, - 3, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_2 = - documents_batch_create_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_3 = - BatchTransition::new_document_creation_transition_from_document( - document_3_on_identity_1, - domain, - entropy.0, - &key_1, - 5, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_3 = - documents_batch_create_transition_3 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_preorder_transition_1.clone(), - documents_batch_create_serialized_preorder_transition_2.clone(), - documents_batch_create_serialized_preorder_transition_3.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 3); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_transition_1.clone(), - documents_batch_create_serialized_transition_2.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 2); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition_3.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::DocumentContestDocumentWithSameIdAlreadyPresentError { .. } - ), - _ - )] - ); - - // Now let's run a query for the vote totals - - let config = bincode::config::standard() - .with_big_endian() - .with_no_limit(); - - let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) - .expect("expected to encode the word dash"); - - let quantum_encoded = - bincode::encode_to_vec(Value::Text("quantum".to_string()), config) - .expect("expected to encode the word quantum"); - - let index_name = "parentNameAndLabel".to_string(); - - let query_validation_result = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: domain.name().clone(), - index_name: index_name.clone(), - index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: false, - start_at_identifier_info: None, - count: None, - prove: false, - }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = query_validation_result.version.expect("expected a version"); - - let Some( - get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( - get_contested_resource_vote_state_response_v0::ContestedResourceContenders { - contenders, - abstain_vote_tally, - lock_vote_tally, - finished_vote_info, - }, - ), - ) = result - else { - panic!("expected contenders") - }; - - assert_eq!(abstain_vote_tally, None); - - assert_eq!(lock_vote_tally, None); - - assert_eq!(finished_vote_info, None); - - assert_eq!(contenders.len(), 2); - - let first_contender = contenders.first().unwrap(); - - let second_contender = contenders.last().unwrap(); - - let first_contender_document = Document::from_bytes( - first_contender - .document - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - let second_contender_document = Document::from_bytes( - second_contender - .document - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - assert_ne!(first_contender_document, second_contender_document); - - assert_eq!(first_contender.identifier, identity_1.id().to_vec()); - - assert_eq!(second_contender.identifier, identity_2.id().to_vec()); - - assert_eq!(first_contender.vote_count, Some(0)); - - assert_eq!(second_contender.vote_count, Some(0)); - - let GetContestedResourceVoteStateResponse { version } = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: domain.name().clone(), - index_name: "parentNameAndLabel".to_string(), - index_values: vec![dash_encoded, quantum_encoded], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: true, - start_at_identifier_info: None, - count: None, - prove: true, - }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = version.expect("expected a version"); - - let Some(get_contested_resource_vote_state_response_v0::Result::Proof(proof)) = result - else { - panic!("expected contenders") - }; - - let resolved_contested_document_vote_poll_drive_query = - ResolvedContestedDocumentVotePollDriveQuery { - vote_poll: ContestedDocumentResourceVotePollWithContractInfoAllowBorrowed { - contract: DataContractResolvedInfo::BorrowedDataContract(&dpns_contract), - document_type_name: domain.name().clone(), - index_name: index_name.clone(), - index_values: vec![ - Value::Text("dash".to_string()), - Value::Text("quantum".to_string()), - ], - }, - result_type: DocumentsAndVoteTally, - offset: None, - limit: None, - start_at: None, - allow_include_locked_and_abstaining_vote_tally: true, - }; - - let (_root_hash, result) = resolved_contested_document_vote_poll_drive_query - .verify_vote_poll_vote_state_proof(proof.grovedb_proof.as_ref(), platform_version) - .expect("expected to verify proof"); - - let contenders = result.contenders; - assert_eq!(contenders.len(), 2); - - let first_contender = contenders.first().unwrap(); - - let second_contender = contenders.last().unwrap(); - - let first_contender_document = Document::from_bytes( - first_contender - .serialized_document() - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - let second_contender_document = Document::from_bytes( - second_contender - .serialized_document() - .as_ref() - .expect("expected a document") - .as_slice(), - domain, - platform_version, - ) - .expect("expected to get document"); - - assert_ne!(first_contender_document, second_contender_document); - - assert_eq!(first_contender.identity_id(), identity_1.id()); - - assert_eq!(second_contender.identity_id(), identity_2.id()); - - assert_eq!(first_contender.vote_tally(), Some(0)); - - assert_eq!(second_contender.vote_tally(), Some(0)); - } - - #[test] - fn test_that_a_contested_document_can_not_be_added_to_after_a_week() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let platform_state = platform.state.load(); - - let (contender_1, contender_2, dpns_contract) = create_dpns_identity_name_contest( - &mut platform, - &platform_state, - 7, - "quantum", - platform_version, - ); - - perform_votes_multi( - &mut platform, - dpns_contract.as_ref(), - vec![ - (TowardsIdentity(contender_1.id()), 50), - (TowardsIdentity(contender_2.id()), 5), - (ResourceVoteChoice::Abstain, 10), - (ResourceVoteChoice::Lock, 3), - ], - "quantum", - 10, - None, - platform_version, - ); - - let max_join_time = platform_version - .dpp - .validation - .voting - .allow_other_contenders_time_testing_ms; - - fast_forward_to_block(&platform, max_join_time / 2, 900, 42, 0, false); - - let platform_state = platform.state.load(); - - let _contender_3 = add_contender_to_dpns_name_contest( - &mut platform, - &platform_state, - 4, - "quantum", - None, // this should succeed, as we are under a week - platform_version, - ); - - let time_now = platform_version - .dpp - .validation - .voting - .allow_other_contenders_time_testing_ms - + 100; - - fast_forward_to_block(&platform, time_now, 900, 42, 0, false); //more than a week, less than 2 weeks - - let platform_state = platform.state.load(); - - // We expect this to fail - - let time_started = 0; - - let extra_time_used = 3000; // add_contender_to_dpns_name_contest uses this extra time - - let expected_error_message = format!( - "Document Contest for vote_poll ContestedDocumentResourceVotePoll {{ contract_id: GWRSAVFMjXx8HpQFaNJMqBV7MBgMK4br5UESsB4S31Ec, document_type_name: domain, index_name: parentNameAndLabel, index_values: [string dash, string quantum] }} is not joinable V0(ContestedDocumentVotePollStoredInfoV0 {{ finalized_events: [], vote_poll_status: Started(BlockInfo {{ time_ms: {}, height: 0, core_height: 0, epoch: 0 }}), locked_count: 0 }}), it started {} and it is now {}, and you can only join for {}", - time_started + extra_time_used, - time_started + extra_time_used, - time_now + extra_time_used, - max_join_time - ); - - let _contender_4 = add_contender_to_dpns_name_contest( - &mut platform, - &platform_state, - 9, - "quantum", - Some(expected_error_message.as_str()), // this should fail, as we are over a week - platform_version, - ); - } - - #[test] - fn test_that_a_contest_can_not_be_joined_twice_by_the_same_identity() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let platform_state = platform.state.load(); - - let ( - ( - _contender_1, - contender_1_signer, - contender_1_key, - _preorder_document_1, - (mut document_1, _entropy), - ), - (_contender_2, _, _, _, _), - dpns_contract, - ) = create_dpns_name_contest_give_key_info( - &mut platform, - &platform_state, - 7, - "quantum", - platform_version, - ); - - let domain = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type"); - - let mut rng = StdRng::seed_from_u64(89); - - let different_entropy = Bytes32::random_with_rng(&mut rng); - - document_1.set_id(Document::generate_document_id_v0( - dpns_contract.id_ref(), - document_1.owner_id_ref(), - domain.name(), - different_entropy.as_slice(), - )); - - let documents_batch_create_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - document_1, - domain, - different_entropy.0, - &contender_1_key, - 4, - 0, - &contender_1_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_1 = - documents_batch_create_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition_1.clone()], - &platform_state, - &BlockInfo::default_with_time( - &platform_state - .last_committed_block_time_ms() - .unwrap_or_default() - + 3000, - ), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let result = processing_result.into_execution_results().remove(0); - - let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result - else { - panic!("expected a paid consensus error"); - }; - assert_eq!(consensus_error.to_string(), "An Identity with the id BjNejy4r9QAvLHpQ9Yq6yRMgNymeGZ46d48fJxJbMrfW is already a contestant for the vote_poll ContestedDocumentResourceVotePoll { contract_id: GWRSAVFMjXx8HpQFaNJMqBV7MBgMK4br5UESsB4S31Ec, document_type_name: domain, index_name: parentNameAndLabel, index_values: [string dash, string quantum] }"); - } - - #[test] - fn test_that_a_contested_document_can_not_be_added_if_we_are_locked() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let platform_state = platform.state.load(); - - let (contender_1, contender_2, dpns_contract) = create_dpns_identity_name_contest( - &mut platform, - &platform_state, - 7, - "quantum", - platform_version, - ); - - perform_votes_multi( - &mut platform, - dpns_contract.as_ref(), - vec![ - (TowardsIdentity(contender_1.id()), 3), - (TowardsIdentity(contender_2.id()), 5), - (ResourceVoteChoice::Abstain, 8), - (ResourceVoteChoice::Lock, 10), - ], - "quantum", - 10, - None, - platform_version, - ); - - fast_forward_to_block( - &platform, - platform_version - .dpp - .validation - .voting - .allow_other_contenders_time_testing_ms - / 2, - 900, - 42, - 0, - false, - ); // a time when others can join - - let platform_state = platform.state.load(); - - let _contender_3 = add_contender_to_dpns_name_contest( - &mut platform, - &platform_state, - 4, - "quantum", - None, // this should succeed, as we are under the `platform_version.dpp.validation.voting.allow_other_contenders_time_testing_ms` - platform_version, - ); - - let time_after_distribution_limit = platform_version - .dpp - .voting_versions - .default_vote_poll_time_duration_test_network_ms - + 10_000; // add 10s (3 seconds is used by create_dpns_identity_name_contest) - - fast_forward_to_block(&platform, time_after_distribution_limit, 900, 42, 0, false); // after distribution - - let platform_state = platform.state.load(); - - let transaction = platform.drive.grove.start_transaction(); - - platform - .check_for_ended_vote_polls( - &platform_state, - &platform_state, - &BlockInfo { - time_ms: time_after_distribution_limit, - height: 900, - core_height: 42, - epoch: Default::default(), - }, - Some(&transaction), - platform_version, - ) - .expect("expected to check for ended vote polls"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let platform_state = platform.state.load(); - - // We expect this to fail - - let expected_error_message = format!( - "Document Contest for vote_poll ContestedDocumentResourceVotePoll {{ contract_id: GWRSAVFMjXx8HpQFaNJMqBV7MBgMK4br5UESsB4S31Ec, document_type_name: domain, index_name: parentNameAndLabel, index_values: [string dash, string quantum] }} is currently already locked V0(ContestedDocumentVotePollStoredInfoV0 {{ finalized_events: [ContestedDocumentVotePollStoredInfoVoteEventV0 {{ resource_vote_choices: [FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: TowardsIdentity(BjNejy4r9QAvLHpQ9Yq6yRMgNymeGZ46d48fJxJbMrfW), voters: [2oGomAQc47V9h3mkpyHUPbF74gT2AmoYKg1oSb94Rbwm:1, 4iroeiNBeBYZetCt21kW7FGyczE8WqoqzZ48YAHwyV7R:1, Cdf8V4KGHHd395x5xPJPPrzTKwmp5MqbuszSE2iMzzeP:1] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: TowardsIdentity(FiLk5pGtspYtF65PKsQq3YFr1DEiXPHTZeKjusT6DuqN), voters: [] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: TowardsIdentity(Fv8S6kTbNrRqKC7PR7XcRUoPR59bxNhhggg5mRaNN6ow), voters: [4MK8GWEWX1PturUqjZJefdE4WGrUqz1UQZnbK17ENkeA:1, 5gRudU7b4n8LYkNvhZomv6FtMrP7gvaTvRrHKfaTS22K:1, AfzQBrdwzDuTVdXrMWqQyVvXRWqPMDVjA76hViuGLh6W:1, E75wdFZB22P1uW1wJBJGPgXZuZKLotK7YmbH5wUk5msH:1, G3ZfS2v39x6FuLGnnJ1RNQyy4zn4Wb64KiGAjqj39wUu:1] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: Abstain, voters: [5Ur8tDxJnatfUd9gcVFDde7ptHydujZzJLNTxa6aMYYy:1, 93Gsg14oT9K4FLYmC7N26uS4g5b7JcM1GwGEDeJCCBPJ:1, 96eX4PTjbXRuGHuMzwXdptWFtHcboXbtevk51Jd73pP7:1, AE9xm2mbemDeMxPUzyt35Agq1axRxggVfV4DRLAZp7Qt:1, FbLyu5d7JxEsvSsujj7Wopg57Wrvz9HH3UULCusKpBnF:1, GsubMWb3LH1skUJrcxTmZ7wus1habJcbpb8su8yBVqFY:1, H9UrL7aWaxDmXhqeGMJy7LrGdT2wWb45mc7kQYsoqwuf:1, Hv88mzPZVKq2fnjoUqK56vjzkcmqRHpWE1ME4z1MXDrw:1] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: Lock, voters: [F1oA8iAoyJ8dgCAi2GSPqcNhp9xEuAqhP47yXBDw5QR:1, 2YSjsJUp74MJpm12rdn8wyPR5MY3c322pV8E8siw989u:1, 3fQrmN4PWhthUFnCFTaJqbT2PPGf7MytAyik4eY1DP8V:1, 7r7gnAiZunVLjtSd5ky4yvPpnWTFYbJuQAapg8kDCeNK:1, 86TUE89xNkBDcmshXRD198xjAvMmKecvHbwo6i83AmqA:1, 97iYr4cirPdG176kqa5nvJWT9tsnqxHmENfRnZUgM6SC:1, 99nKfYZL4spsTe9p9pPNhc1JWv9yq4CbPPMPm87a5sgn:1, BYAqFxCVwMKrw5YAQMCFQGiAF2v3YhKRm2EdGfgkYN9G:1, CGKeK3AfdZUxXF3qH9zxp5MR7Z4WvDVqMrU5wjMKqT5C:1, HRPPEX4mdoZAMkg6NLJUgDzN4pSTpiDXEAGcR5JBdiXX:1] }}], start_block: BlockInfo {{ time_ms: 3000, height: 0, core_height: 0, epoch: 0 }}, finalization_block: BlockInfo {{ time_ms: {}, height: 900, core_height: 42, epoch: 0 }}, winner: Locked }}], vote_poll_status: Locked, locked_count: 1 }}), unlocking is possible by paying 400000000000 credits", - time_after_distribution_limit - ); - - let _contender_4 = add_contender_to_dpns_name_contest( - &mut platform, - &platform_state, - 9, - "quantum", - Some(expected_error_message.as_str()), // this should fail, as it is locked - platform_version, - ); - } - - #[test] - fn test_document_creation_on_restricted_document_type_that_only_allows_contract_owner_to_create( - ) { - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_initial_state_structure(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (another_identity, another_identity_signer, another_identity_key) = - setup_identity(&mut platform, 450, dash_to_credits!(0.1)); - - let card_game_path = "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase-creation-restricted-to-owner.json"; - - let platform_state = platform.state.load(); - let platform_version = platform_state - .current_platform_version() - .expect("expected to get current platform version"); - - // let's construct the grovedb structure for the card game data contract - let mut contract = json_document_to_contract(card_game_path, true, platform_version) - .expect("expected to get data contract"); - - contract.set_owner_id(identity.id()); - - platform - .drive - .apply_contract( - &contract, - BlockInfo::default(), - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, - ) - .expect("expected to apply contract successfully"); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert_eq!( - card_document_type.creation_restriction_mode(), - CreationRestrictionMode::OwnerOnly - ); - - let mut rng = StdRng::seed_from_u64(433); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - // There is no issue because the creator of the contract made the document - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - // Now let's try for another identity - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - another_identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 8.into()); - document.set("defense", 2.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &another_identity_key, - 2, - 0, - &another_identity_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - // There is no issue because the creator of the contract made the document - - assert_eq!(processing_result.invalid_paid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let result = processing_result.into_execution_results().remove(0); - - let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result - else { - panic!("expected a paid consensus error"); - }; - assert_eq!(consensus_error.to_string(), "Document Creation on 86LHvdC1Tqx5P97LQUSibGFqf2vnKFpB6VkqQ7oso86e:card is not allowed because of the document type's creation restriction mode Owner Only"); - } - } - - mod replacement_tests { - use super::*; - use crate::test::helpers::fast_forward_to_block::fast_forward_to_block; - use dpp::identifier::Identifier; - use dpp::prelude::IdentityNonce; - use std::collections::BTreeMap; - - #[test] - fn test_document_replace_on_document_type_that_is_mutable() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - altered_document.set("displayName", "Samuel".into()); - altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let documents_batch_update_transition = - BatchTransition::new_document_replacement_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition = documents_batch_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 1443820); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - fn perform_document_replace_on_profile_after_epoch_change( - original_name: &str, - new_names: Vec<(&str, StorageFlags)>, - ) { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("displayName", original_name.into()); - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - for (i, (new_name, mut expected_flags)) in new_names.into_iter().enumerate() { - document.increment_revision().unwrap(); - document.set("displayName", new_name.into()); - - fast_forward_to_block( - &platform, - 500_000_000 + i as u64 * 1000, - 900 + i as u64, - 42, - 1 + i as u16, - true, - ); //less than a week - - let documents_batch_update_transition = - BatchTransition::new_document_replacement_transition_from_document( - document.clone(), - profile, - &key, - 3 + i as IdentityNonce, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition = - documents_batch_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let platform_state = platform.state.load(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!( - processing_result.valid_count(), - 1, - "{:?}", - processing_result.execution_results() - ); - - let drive_query = DriveDocumentQuery::new_primary_key_single_item_query( - &dashpay, - profile, - document.id(), - ); - - let mut documents = platform - .drive - .query_documents_with_flags(drive_query, None, false, None, None) - .expect("expected to get back documents") - .documents_owned(); - - let (_first_document, storage_flags) = documents.remove(0); - - let storage_flags = storage_flags.expect("expected storage flags"); - - expected_flags.set_owner_id(identity.id().to_buffer()); - - assert_eq!(storage_flags, expected_flags); - } - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size() { - perform_document_replace_on_profile_after_epoch_change( - "Sam", - vec![( - "Samuel", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 6)]), - Identifier::default().to_buffer(), - ), - )], - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_mutable_different_epoch_smaller_size() { - perform_document_replace_on_profile_after_epoch_change( - "Sam", - vec![( - "S", - StorageFlags::SingleEpochOwned(0, Identifier::default().to_buffer()), - )], - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_mutable_different_epoch_same_size() { - perform_document_replace_on_profile_after_epoch_change( - "Sam", - vec![( - "Max", - StorageFlags::SingleEpochOwned(0, Identifier::default().to_buffer()), - )], - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_bigger_size( - ) { - perform_document_replace_on_profile_after_epoch_change( - "Sam", - vec![ - ( - "Samuel", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 6)]), - Identifier::default().to_buffer(), - ), - ), - ( - "SamuelW", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 6), (2, 4)]), - Identifier::default().to_buffer(), - ), - ), - ], - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_bigger_size_by_3_bytes( - ) { - perform_document_replace_on_profile_after_epoch_change( - "Sam", - vec![ - ( - "Samuel", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 6)]), - Identifier::default().to_buffer(), - ), - ), - ( - "SamuelWes", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 6), (2, 6)]), - Identifier::default().to_buffer(), - ), - ), - ], - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_smaller_size( - ) { - // In this case we start with the size Samuell Base epoch 0 epoch 1 added 7 bytes - // Then we try to update it to Sami Base epoch 2 - // Epoch 1 added 7 bytes is itself 3 bytes - // Sami is 3 bytes less than Samuell - // First iteration will say we should remove 6 bytes - // We need to start by calculating the cost of the original storage flags, in this case 5 bytes - // Then we need to calculate the cost of the new storage flags, in this case 2 bytes - // We should do the difference, then apply that difference in the combination function - perform_document_replace_on_profile_after_epoch_change( - "Sam", - vec![ - ( - "Samuell", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 7)]), - Identifier::default().to_buffer(), - ), - ), - ( - "Sami", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 4)]), - Identifier::default().to_buffer(), - ), - ), - ], - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_back_to_original( - ) { - perform_document_replace_on_profile_after_epoch_change( - "Sam", - vec![ - ( - "Samuel", - StorageFlags::MultiEpochOwned( - 0, - BTreeMap::from([(1, 6)]), - Identifier::default().to_buffer(), - ), - ), - ( - "Sam", - StorageFlags::SingleEpochOwned(0, Identifier::default().to_buffer()), - ), - ], - ); - } - - #[test] - fn test_document_replace_on_document_type_that_is_not_mutable() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(437); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let contact_request_document_type = dashpay_contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type"); - - assert!(!contact_request_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = contact_request_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set( - "toUserId", - Value::Identifier(other_identity.id().to_buffer()), - ); - document.set("recipientKeyIndex", Value::U32(1)); - document.set("senderKeyIndex", Value::U32(1)); - document.set("accountReference", Value::U32(0)); - - let mut altered_document = document.clone(); - - altered_document.set_revision(Some(1)); - altered_document.set("senderKeyIndex", Value::U32(2)); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - contact_request_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let documents_batch_update_transition = - BatchTransition::new_document_replacement_transition_from_document( - altered_document, - contact_request_document_type, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition = documents_batch_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 41880); - } - - #[test] - fn test_document_replace_on_document_type_that_is_not_mutable_but_is_transferable() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_transfer_only(Transferable::Always); - - let mut rng = StdRng::seed_from_u64(435); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, _, _) = setup_identity(&mut platform, 452, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", receiver.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - document.set("attack", 6.into()); - document.set("defense", 0.into()); - - let documents_batch_transfer_transition = - BatchTransition::new_document_replacement_transition_from_document( - document, - card_document_type, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for transfer"); - - let documents_batch_transfer_serialized_transition = - documents_batch_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - } - - #[test] - fn test_document_replace_that_does_not_yet_exist() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - altered_document.set("displayName", "Samuel".into()); - altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); - - let documents_batch_update_transition = - BatchTransition::new_document_replacement_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition = documents_batch_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 516040); - } - - #[test] - fn test_double_document_replace() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - altered_document.set("displayName", "Samuel".into()); - altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); - - let mut altered_document_2 = altered_document.clone(); - - altered_document_2.increment_revision().unwrap(); - altered_document_2.set("displayName", "Ody".into()); - altered_document_2.set("avatarUrl", "http://test.com/drapes.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let receiver_documents_sql_string = "select * from profile".to_string(); - - let query_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &dashpay, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); - - let documents_batch_update_transition_1 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_1 = - documents_batch_update_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_update_transition_2 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document_2, - profile, - &key, - 4, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_2 = - documents_batch_update_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_update_serialized_transition_1.clone(), - documents_batch_update_serialized_transition_2.clone(), - ], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 2); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/drap[...(26)] displayName:string Ody publicMessage:string 8XG7KBGNvm2 "); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_double_document_replace_different_height_same_epoch() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - altered_document.set("displayName", "Samuel".into()); - altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); - - let mut altered_document_2 = altered_document.clone(); - - altered_document_2.increment_revision().unwrap(); - altered_document_2.set("displayName", "Ody".into()); - altered_document_2.set("avatarUrl", "http://test.com/drapes.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let receiver_documents_sql_string = "select * from profile".to_string(); - - let query_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &dashpay, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); - - fast_forward_to_block(&platform, 1_400_000_000, 901, 43, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let documents_batch_update_transition_1 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_1 = - documents_batch_update_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_update_transition_2 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document_2, - profile, - &key, - 4, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_2 = - documents_batch_update_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition_1.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-17 04:53:20 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/cat.[...(23)] displayName:string Samuel publicMessage:string 8XG7KBGNvm2 "); - - fast_forward_to_block(&platform, 1_600_000_000, 902, 44, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition_2.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-19 12:26:40 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/drap[...(26)] displayName:string Ody publicMessage:string 8XG7KBGNvm2 "); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_double_document_replace_no_change_different_height_same_epoch() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - - let mut altered_document_2 = altered_document.clone(); - - altered_document_2.increment_revision().unwrap(); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let receiver_documents_sql_string = "select * from profile".to_string(); - - let query_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &dashpay, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); - - fast_forward_to_block(&platform, 1_400_000_000, 901, 43, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let documents_batch_update_transition_1 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_1 = - documents_batch_update_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_update_transition_2 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document_2, - profile, - &key, - 4, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_2 = - documents_batch_update_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition_1.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-17 04:53:20 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); - - fast_forward_to_block(&platform, 1_600_000_000, 902, 44, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition_2.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-19 12:26:40 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_double_document_replace_different_height_different_epoch() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - altered_document.set("displayName", "Samuel".into()); - altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); - - let mut altered_document_2 = altered_document.clone(); - - altered_document_2.increment_revision().unwrap(); - altered_document_2.set("displayName", "Ody".into()); - altered_document_2.set("avatarUrl", "http://test.com/drapes.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let receiver_documents_sql_string = "select * from profile".to_string(); - - let query_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &dashpay, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); - - fast_forward_to_block(&platform, 1_400_000_000, 901, 43, 1, false); //next epoch - - let platform_state = platform.state.load(); - - let documents_batch_update_transition_1 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_1 = - documents_batch_update_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_update_transition_2 = - BatchTransition::new_document_replacement_transition_from_document( - altered_document_2, - profile, - &key, - 4, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition_2 = - documents_batch_update_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition_1.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-17 04:53:20 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/cat.[...(23)] displayName:string Samuel publicMessage:string 8XG7KBGNvm2 "); - - fast_forward_to_block(&platform, 1_600_000_000, 905, 44, 2, true); //next epoch - - let platform_state = platform.state.load(); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition_2.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - let query_sender_results = platform - .drive - .query_documents(query_documents.clone(), None, false, None, None) - .expect("expected query result"); - - let document = query_sender_results - .documents() - .first() - .expect("expected a document"); - - assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-19 12:26:40 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/drap[...(26)] displayName:string Ody publicMessage:string 8XG7KBGNvm2 "); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - } - - mod deletion_tests { - use super::*; - - #[test] - fn test_document_delete_on_document_type_that_is_mutable_and_can_be_deleted() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - assert!(profile.documents_mutable()); - - assert!(profile.documents_can_be_deleted()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - altered_document.set("displayName", "Samuel".into()); - altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - profile, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let documents_batch_deletion_transition = - BatchTransition::new_document_deletion_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition = documents_batch_deletion_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 1711420); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_document_delete_on_document_type_that_is_mutable_and_can_not_be_deleted() { - let mut platform = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure(); - - let contract_path = "tests/supporting_files/contract/dashpay/dashpay-contract-contact-request-mutable-and-can-not-be-deleted.json"; - - let platform_state = platform.state.load(); - let platform_version = platform_state - .current_platform_version() - .expect("expected to get current platform version"); - - // let's construct the grovedb structure for the card game data contract - let dashpay_contract = json_document_to_contract(contract_path, true, platform_version) - .expect("expected to get data contract"); - platform - .drive - .apply_contract( - &dashpay_contract, - BlockInfo::default(), - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, - ) - .expect("expected to apply contract successfully"); - - let mut rng = StdRng::seed_from_u64(437); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); - - let contact_request_document_type = dashpay_contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type"); - - assert!(contact_request_document_type.documents_mutable()); - - assert!(!contact_request_document_type.documents_can_be_deleted()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = contact_request_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set( - "toUserId", - Value::Identifier(other_identity.id().to_buffer()), - ); - document.set("recipientKeyIndex", Value::U32(1)); - document.set("senderKeyIndex", Value::U32(1)); - document.set("accountReference", Value::U32(0)); - - let mut altered_document = document.clone(); - - altered_document.set_revision(Some(1)); - altered_document.set("senderKeyIndex", Value::U32(2)); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - contact_request_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let documents_batch_deletion_transition = - BatchTransition::new_document_deletion_transition_from_document( - altered_document, - contact_request_document_type, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_deletion_serialized_transition = - documents_batch_deletion_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_deletion_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); - } - - #[test] - fn test_document_delete_on_document_type_that_is_not_mutable_and_can_be_deleted() { - let mut platform = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure(); - - let contract_path = "tests/supporting_files/contract/dashpay/dashpay-contract-contact-request-not-mutable-and-can-be-deleted.json"; - - let platform_state = platform.state.load(); - let platform_version = platform_state - .current_platform_version() - .expect("expected to get current platform version"); - - // let's construct the grovedb structure for the card game data contract - let dashpay_contract = json_document_to_contract(contract_path, true, platform_version) - .expect("expected to get data contract"); - platform - .drive - .apply_contract( - &dashpay_contract, - BlockInfo::default(), - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, - ) - .expect("expected to apply contract successfully"); - - let mut rng = StdRng::seed_from_u64(437); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); - - let contact_request_document_type = dashpay_contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type"); - - assert!(!contact_request_document_type.documents_mutable()); - - assert!(contact_request_document_type.documents_can_be_deleted()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = contact_request_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set( - "toUserId", - Value::Identifier(other_identity.id().to_buffer()), - ); - document.set("recipientKeyIndex", Value::U32(1)); - document.set("senderKeyIndex", Value::U32(1)); - document.set("accountReference", Value::U32(0)); - - let mut altered_document = document.clone(); - - altered_document.set_revision(Some(1)); - altered_document.set("senderKeyIndex", Value::U32(2)); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - contact_request_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let documents_batch_deletion_transition = - BatchTransition::new_document_deletion_transition_from_document( - altered_document, - contact_request_document_type, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_deletion_serialized_transition = - documents_batch_deletion_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_deletion_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 2762400); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_document_delete_on_document_type_that_is_not_mutable_and_can_not_be_deleted() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(437); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let contact_request_document_type = dashpay_contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type"); - - assert!(!contact_request_document_type.documents_mutable()); - - assert!(!contact_request_document_type.documents_can_be_deleted()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = contact_request_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set( - "toUserId", - Value::Identifier(other_identity.id().to_buffer()), - ); - document.set("recipientKeyIndex", Value::U32(1)); - document.set("senderKeyIndex", Value::U32(1)); - document.set("accountReference", Value::U32(0)); - - let mut altered_document = document.clone(); - - altered_document.set_revision(Some(1)); - altered_document.set("senderKeyIndex", Value::U32(2)); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document, - contact_request_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let documents_batch_deletion_transition = - BatchTransition::new_document_deletion_transition_from_document( - altered_document, - contact_request_document_type, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_deletion_serialized_transition = - documents_batch_deletion_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_deletion_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); - } - - #[test] - fn test_document_delete_that_does_not_yet_exist() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); - let dashpay_contract = dashpay.clone(); - - let profile = dashpay_contract - .document_type_for_name("profile") - .expect("expected a profile document type"); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = profile - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("avatarUrl", "http://test.com/bob.jpg".into()); - - let mut altered_document = document.clone(); - - altered_document.increment_revision().unwrap(); - altered_document.set("displayName", "Samuel".into()); - altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); - - let documents_batch_delete_transition = - BatchTransition::new_document_deletion_transition_from_document( - altered_document, - profile, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_delete_serialized_transition = documents_batch_delete_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_delete_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 516040); - } - } - - mod transfer_tests { - use super::*; - - #[test] - fn test_document_transfer_on_document_type_that_is_transferable_that_has_no_owner_indices() - { - let mut platform = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure(); - - let card_game_path = "tests/supporting_files/contract/crypto-card-game/crypto-card-game-all-transferable-no-owner-indexes.json"; - - let platform_state = platform.state.load(); - let platform_version = platform_state - .current_platform_version() - .expect("expected to get current platform version"); - - // let's construct the grovedb structure for the card game data contract - let contract = json_document_to_contract(card_game_path, true, platform_version) - .expect("expected to get data contract"); - platform - .drive - .apply_contract( - &contract, - BlockInfo::default(), - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, - ) - .expect("expected to apply contract successfully"); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, _, _) = setup_identity(&mut platform, 450, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - document.set_revision(Some(2)); - - let documents_batch_transfer_transition = - BatchTransition::new_document_transfer_transition_from_document( - document, - card_document_type, - receiver.id(), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for transfer"); - - let documents_batch_transfer_serialized_transition = - documents_batch_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 0); // There is no storage fee, as there are no indexes that will change - - assert_eq!(processing_result.aggregated_fees().processing_fee, 1985420); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_document_transfer_on_document_type_that_is_transferable() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_transfer_only(Transferable::Always); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, _, _) = setup_identity(&mut platform, 450, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", receiver.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - let documents_batch_transfer_transition = - BatchTransition::new_document_transfer_transition_from_document( - document, - card_document_type, - receiver.id(), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for transfer"); - - let documents_batch_transfer_serialized_transition = - documents_batch_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 37341000); // 1383 bytes added - - // todo: we should expect these numbers to be closer - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - Some(14992395) - ); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 3369260); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to have no documents, and the receiver to have 1 - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 1); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - - #[test] - fn test_document_transfer_on_document_type_that_is_not_transferable() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_transfer_only(Transferable::Never); - - let mut rng = StdRng::seed_from_u64(435); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, _, _) = setup_identity(&mut platform, 452, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", receiver.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - let documents_batch_transfer_transition = - BatchTransition::new_document_transfer_transition_from_document( - document, - card_document_type, - receiver.id(), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for transfer"); - - let documents_batch_transfer_serialized_transition = - documents_batch_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - } - - #[test] - fn test_document_transfer_that_does_not_yet_exist() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_transfer_only(Transferable::Never); - - let mut rng = StdRng::seed_from_u64(435); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, _, _) = setup_identity(&mut platform, 452, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", receiver.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 0 documents, and the receiver to also have none - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - let documents_batch_transfer_transition = - BatchTransition::new_document_transfer_transition_from_document( - document, - card_document_type, - receiver.id(), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for transfer"); - - let documents_batch_transfer_serialized_transition = - documents_batch_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 36200); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to still have no document, and the receiver to have none as well - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 0); - } - - #[test] - fn test_document_delete_after_transfer() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_transfer_only(Transferable::Always); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, recipient_signer, recipient_key) = - setup_identity(&mut platform, 450, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", receiver.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - let documents_batch_transfer_transition = - BatchTransition::new_document_transfer_transition_from_document( - document.clone(), - card_document_type, - receiver.id(), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for transfer"); - - let documents_batch_transfer_serialized_transition = - documents_batch_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 3730120); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to have no documents, and the receiver to have 1 - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 1); - - // Now let's try to delete the transferred document - - document.set_owner_id(receiver.id()); - - let documents_batch_deletion_transition = - BatchTransition::new_document_deletion_transition_from_document( - document, - card_document_type, - &recipient_key, - 2, - 0, - &recipient_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_deletion_serialized_transition = - documents_batch_deletion_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_deletion_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 571240); - - let issues = platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); - - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); - } - } - - mod nft_tests { - use super::*; - use crate::test::helpers::fast_forward_to_block::fast_forward_to_block; - #[test] - fn test_document_set_price_on_document_without_ability_to_purchase() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_transfer_only(Transferable::Always); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let result = processing_result.into_execution_results().remove(0); - - let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result - else { - panic!("expected a paid consensus error"); - }; - assert_eq!(consensus_error.to_string(), "Document transition action card is in trade mode No Trading that does not support the seller setting the price is not supported"); - } - - #[test] - fn test_document_set_price() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, _, _) = setup_identity(&mut platform, 450, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", receiver.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - // The sender document should have the desired price - - let document = query_sender_results.documents().first().unwrap(); - - let price: Credits = document - .properties() - .get_integer("$price") - .expect("expected to get back price"); - - assert_eq!(dash_to_credits!(0.1), price); - - assert_eq!(document.revision(), Some(2)); - } - - #[test] - fn test_document_set_price_and_purchase() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (purchaser, recipient_signer, recipient_key) = - setup_identity(&mut platform, 450, dash_to_credits!(1.0)); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - assert_eq!(seller_balance, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!( - processing_result - .aggregated_fees() - .clone() - .into_balance_change(identity.id()) - .change(), - &BalanceChange::RemoveFromBalance { - required_removed_balance: 123579000, - desired_removed_balance: 126435860, - } - ); - - let original_creation_cost = 126435860; - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.1) - original_creation_cost - ); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", purchaser.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 216000); // we added 8 bytes for the price - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - None - ); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.1) - original_creation_cost - 2689880 - ); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - // The sender document should have the desired price - - let mut document = query_sender_results.documents_owned().remove(0); - - let price: Credits = document - .properties() - .get_integer("$price") - .expect("expected to get back price"); - - assert_eq!(dash_to_credits!(0.1), price); - - // At this point we want to have the receiver purchase the document - - document.set_revision(Some(3)); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - purchaser.id(), - dash_to_credits!(0.1), //same price as requested - &recipient_key, - 1, // 1 because he's never done anything - 0, - &recipient_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 64611000); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 4080480); - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - Some(22704503) - ); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to have no documents, and the receiver to have 1 - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 1); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.2) - original_creation_cost + 20014623 - ); - - let buyers_balance = platform - .drive - .fetch_identity_balance(purchaser.id().to_buffer(), None, platform_version) - .expect("expected to get purchaser balance") - .expect("expected that purchaser exists"); - - // the buyer paid 0.1, but also storage and processing fees - assert_eq!(buyers_balance, dash_to_credits!(0.9) - 68691480); - } - - #[test] - fn test_document_set_price_and_purchase_different_epoch_documents_mutable() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure(); - - let card_game_path = "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase-documents-mutable.json"; - - // let's construct the grovedb structure for the card game data contract - let contract = json_document_to_contract(card_game_path, true, platform_version) - .expect("expected to get data contract"); - platform - .drive - .apply_contract( - &contract, - BlockInfo::default(), - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, - ) - .expect("expected to apply contract successfully"); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (purchaser, recipient_signer, recipient_key) = - setup_identity(&mut platform, 450, dash_to_credits!(1.0)); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - assert_eq!(seller_balance, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!( - processing_result - .aggregated_fees() - .clone() - .into_balance_change(identity.id()) - .change(), - &BalanceChange::RemoveFromBalance { - required_removed_balance: 138159000, - desired_removed_balance: 141234660, - } - ); - - let original_creation_cost = 141234660; - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.1) - original_creation_cost - ); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", purchaser.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - // now let's modify the document - - fast_forward_to_block(&platform, 500_000, 100, 3, 1, false); //next epoch - - document.set("description", "chopsticks".into()); - document.bump_revision(); - - let documents_batch_update_transition = - BatchTransition::new_document_replacement_transition_from_document( - document.clone(), - card_document_type, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_update_serialized_transition = documents_batch_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let platform_state = platform.state.load(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_update_serialized_transition.clone()], - &platform_state, - platform_state.last_block_info(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!( - processing_result.invalid_paid_count(), - 0, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!( - processing_result.invalid_unpaid_count(), - 0, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!( - processing_result.valid_count(), - 1, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 378000); - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - None - ); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 2717400); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.1) - original_creation_cost - 2717400 - 378000 - ); - - // now let's update price, but first go to next epoch - - fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 2, false); //next epoch - - document.bump_revision(); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 4, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!( - processing_result.invalid_paid_count(), - 0, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!( - processing_result.invalid_unpaid_count(), - 0, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 216000); // we added 8 bytes for the price - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - None - ); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 2721160); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.1) - - original_creation_cost - - 2717400 - - 378000 - - 2721160 - - 216000 - ); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - // The sender document should have the desired price - - let mut document = query_sender_results.documents_owned().remove(0); - - let price: Credits = document - .properties() - .get_integer("$price") - .expect("expected to get back price"); - - assert_eq!(dash_to_credits!(0.1), price); - - // At this point we want to have the receiver purchase the document at the next epoch - - fast_forward_to_block(&platform, 1_700_000_000, 1200, 42, 3, false); //next epoch - - document.bump_revision(); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - purchaser.id(), - dash_to_credits!(0.1), //same price as requested - &recipient_key, - 1, // 1 because he's never done anything - 0, - &recipient_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!( - processing_result.invalid_paid_count(), - 0, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!( - processing_result.invalid_unpaid_count(), - 0, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!( - processing_result.valid_count(), - 1, - "{:?}", - processing_result.execution_results() - ); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 64611000); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 4345280); - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - Some(52987722) - ); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to have no documents, and the receiver to have 1 - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 1); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.2) - original_creation_cost + 46955162 - ); - - let buyers_balance = platform - .drive - .fetch_identity_balance(purchaser.id().to_buffer(), None, platform_version) - .expect("expected to get purchaser balance") - .expect("expected that purchaser exists"); - - // the buyer paid 0.1, but also storage and processing fees - assert_eq!(buyers_balance, dash_to_credits!(0.9) - 68956280); - } - - #[test] - fn test_document_set_price_and_purchase_different_epoch() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (purchaser, recipient_signer, recipient_key) = - setup_identity(&mut platform, 450, dash_to_credits!(1.0)); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - assert_eq!(seller_balance, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!( - processing_result - .aggregated_fees() - .clone() - .into_balance_change(identity.id()) - .change(), - &BalanceChange::RemoveFromBalance { - required_removed_balance: 123579000, - desired_removed_balance: 126435860, - } - ); - - let original_creation_cost = 126435860; - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.1) - original_creation_cost - ); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", purchaser.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - // now let's update price, but first go to next epoch - - fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 216000); // we added 8 bytes for the price - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - None - ); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.1) - original_creation_cost - 2689880 - ); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - // The sender document should have the desired price - - let mut document = query_sender_results.documents_owned().remove(0); - - let price: Credits = document - .properties() - .get_integer("$price") - .expect("expected to get back price"); - - assert_eq!(dash_to_credits!(0.1), price); - - // At this point we want to have the receiver purchase the document at the next epoch - - fast_forward_to_block(&platform, 1_700_000_000, 1200, 42, 2, false); //next epoch - - document.set_revision(Some(3)); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - purchaser.id(), - dash_to_credits!(0.1), //same price as requested - &recipient_key, - 1, // 1 because he's never done anything - 0, - &recipient_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().storage_fee, 64611000); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 4080480); - - assert_eq!( - processing_result - .aggregated_fees() - .fee_refunds - .calculate_refunds_amount_for_identity(identity.id()), - Some(22704503) - ); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents(query_receiver_identity_documents, None, false, None, None) - .expect("expected query result"); - - // We expect the sender to have no documents, and the receiver to have 1 - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 1); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee - assert_eq!( - seller_balance, - dash_to_credits!(0.2) - original_creation_cost + 20014623 - ); - - let buyers_balance = platform - .drive - .fetch_identity_balance(purchaser.id().to_buffer(), None, platform_version) - .expect("expected to get purchaser balance") - .expect("expected that purchaser exists"); - - // the buyer paid 0.1, but also storage and processing fees - assert_eq!(buyers_balance, dash_to_credits!(0.9) - 68691480); - } - - #[test] - fn test_document_set_price_and_try_purchase_at_different_amount() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (purchaser, recipient_signer, recipient_key) = - setup_identity(&mut platform, 450, dash_to_credits!(1.0)); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - assert_eq!(seller_balance, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.5), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 1); - - // At this point we want to have the receiver purchase the document - - document.set_revision(Some(3)); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - purchaser.id(), - dash_to_credits!(0.35), //different than requested price - &recipient_key, - 1, // 1 because he's never done anything - 0, - &recipient_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - let result = processing_result.into_execution_results().remove(0); - - let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result - else { - panic!("expected a paid consensus error"); - }; - assert_eq!(consensus_error.to_string(), "5rJccTdtJfg6AxSKyrptWUug3PWjveEitTTLqBn9wHdk document can not be purchased for 35000000000, it's sale price is 50000000000 (in credits)"); - } - - #[test] - fn test_document_set_price_and_purchase_from_ones_self() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.5)); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - assert_eq!(seller_balance, dash_to_credits!(0.5)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 1); - - // At this point we want to have the receiver purchase the document - - document.set_revision(Some(3)); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - identity.id(), - dash_to_credits!(0.1), //same price as requested - &key, - 1, // 1 because he's never done anything - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - let result = processing_result.into_execution_results().remove(0); - - let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result - else { - panic!("expected a paid consensus error"); - }; - assert_eq!(consensus_error.to_string(), "Document transition action on document type: card identity trying to purchase a document that is already owned by the purchaser is not supported"); - } - - #[test] - fn test_document_set_price_and_purchase_then_try_buy_back() { - // In this test we try to buy back a document after it has been sold - - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (purchaser, recipient_signer, recipient_key) = - setup_identity(&mut platform, 450, dash_to_credits!(1.0)); - - let seller_balance = platform - .drive - .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) - .expect("expected to get identity balance") - .expect("expected that identity exists"); - - assert_eq!(seller_balance, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 1); - - // At this point we want to have the receiver purchase the document - - document.set_revision(Some(3)); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - purchaser.id(), - dash_to_credits!(0.1), //same price as requested - &recipient_key, - 1, // 1 because he's never done anything - 0, - &recipient_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 1); - - // Let's verify some stuff - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", purchaser.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 0); - - assert_eq!(query_receiver_results.documents().len(), 1); - - // The sender document should have the desired price - - let mut document = query_receiver_results.documents_owned().remove(0); - - let price: Option = document - .properties() - .get_optional_integer("$price") - .expect("expected to get back price"); - - assert_eq!(price, None); - - assert_eq!(document.owner_id(), purchaser.id()); - - // At this point we want to have the sender to try to buy back the document - - document.set_revision(Some(4)); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - identity.id(), - dash_to_credits!(0.1), //same price as old requested - &key, - 4, // 1 because he's never done anything - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - let result = processing_result.into_execution_results().remove(0); - - let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result - else { - panic!("expected a paid consensus error"); - }; - assert_eq!( - consensus_error.to_string(), - "5rJccTdtJfg6AxSKyrptWUug3PWjveEitTTLqBn9wHdk document not for sale" - ); - } - - #[test] - fn test_document_set_price_and_purchase_with_enough_credits_to_buy_but_not_enough_to_pay_for_processing( - ) { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (receiver, recipient_signer, recipient_key) = - setup_identity(&mut platform, 450, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let receiver_documents_sql_string = - format!("select * from card where $ownerId == '{}'", receiver.id()); - - let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( - receiver_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to have 1 document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - document.set_revision(Some(2)); - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 1); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); - - let query_sender_results = platform - .drive - .query_documents( - query_sender_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - let query_receiver_results = platform - .drive - .query_documents( - query_receiver_identity_documents.clone(), - None, - false, - None, - None, - ) - .expect("expected query result"); - - // We expect the sender to still have their document, and the receiver to have none - assert_eq!(query_sender_results.documents().len(), 1); - - assert_eq!(query_receiver_results.documents().len(), 0); - - // The sender document should have the desired price - - let mut document = query_sender_results.documents_owned().remove(0); - - let price: Credits = document - .properties() - .get_integer("$price") - .expect("expected to get back price"); - - assert_eq!(dash_to_credits!(0.1), price); - - // At this point we want to have the receiver purchase the document - - document.set_revision(Some(3)); - - let documents_batch_purchase_transition = - BatchTransition::new_document_purchase_transition_from_document( - document.clone(), - card_document_type, - receiver.id(), - dash_to_credits!(0.1), //same price as requested - &recipient_key, - 1, // 1 because he's never done anything - 0, - &recipient_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the purchase"); - - let documents_batch_purchase_serialized_transition = - documents_batch_purchase_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_purchase_serialized_transition], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - // nothing can go through because the purchaser doesn't have enough balance - - assert_eq!(processing_result.invalid_paid_count(), 0); - - assert_eq!(processing_result.invalid_unpaid_count(), 1); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 0); - } - - #[test] - fn test_document_set_price_on_not_owned_document() { - let platform_version = PlatformVersion::latest(); - let (mut platform, contract) = TestPlatformBuilder::new() - .build_with_mock_rpc() - .set_initial_state_structure() - .with_crypto_card_game_nft(TradeMode::DirectPurchase); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); - - let (other_identity, other_identity_signer, other_identity_key) = - setup_identity(&mut platform, 450, dash_to_credits!(0.1)); - - let card_document_type = contract - .document_type_for_name("card") - .expect("expected a profile document type"); - - assert!(!card_document_type.documents_mutable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut document = card_document_type - .random_document_with_identifier_and_entropy( - &mut rng, - identity.id(), - entropy, - DocumentFieldFillType::DoNotFillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document.set("attack", 4.into()); - document.set("defense", 7.into()); - - let documents_batch_create_transition = - BatchTransition::new_document_creation_transition_from_document( - document.clone(), - card_document_type, - entropy.0, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_eq!(processing_result.valid_count(), 1); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - document.set_revision(Some(2)); - - document.set_owner_id(other_identity.id()); // we do this to trick the system - - let documents_batch_update_price_transition = - BatchTransition::new_document_update_price_transition_from_document( - document.clone(), - card_document_type, - dash_to_credits!(0.1), - &other_identity_key, - 1, - 0, - &other_identity_signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition for the update price"); - - let documents_batch_transfer_serialized_transition = - documents_batch_update_price_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default_with_time(50000000), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.invalid_paid_count(), 1); - - assert_eq!(processing_result.invalid_unpaid_count(), 0); - - assert_eq!(processing_result.valid_count(), 0); - - assert_eq!(processing_result.aggregated_fees().processing_fee, 36200); - - let sender_documents_sql_string = - format!("select * from card where $ownerId == '{}'", identity.id()); - - let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( - sender_documents_sql_string.as_str(), - &contract, - Some(&platform.config.drive), - ) - .expect("expected document query"); - - let query_sender_results = platform - .drive - .query_documents(query_sender_identity_documents, None, false, None, None) - .expect("expected query result"); - - // The sender document should not have the desired price - - let document = query_sender_results.documents().first().unwrap(); - - assert_eq!( - document - .properties() - .get_optional_integer::("$price") - .expect("expected None"), - None - ); - } - } - - mod dpns_tests { - use super::*; - use crate::execution::validation::state_transition::tests::setup_identity; - use crate::test::helpers::setup::TestPlatformBuilder; - use dpp::dash_to_credits; - use dpp::data_contract::document_type::random_document::{ - DocumentFieldFillSize, DocumentFieldFillType, - }; - use dpp::data_contract::DataContract; - use dpp::platform_value::Bytes32; - use dpp::state_transition::batch_transition::BatchTransition; - use dpp::util::hash::hash_double; - use drive::query::{InternalClauses, OrderClause, WhereClause, WhereOperator}; - use drive::util::test_helpers::setup_contract; - use indexmap::IndexMap; - use platform_version::version::PlatformVersion; - use rand::prelude::StdRng; - use std::collections::BTreeMap; - - #[test] - fn test_dpns_contract_references_with_no_contested_unique_index() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity_1, signer_1, key_1) = - setup_identity(&mut platform, 958, dash_to_credits!(0.5)); - - let (identity_2, signer_2, key_2) = - setup_identity(&mut platform, 93, dash_to_credits!(0.5)); - - let (identity_3, signer_3, key_3) = - setup_identity(&mut platform, 98, dash_to_credits!(0.5)); - - let dashpay_contract = setup_contract( - &platform.drive, - "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", - None, - None, - None::, - None, - None, - ); - - let card_game = setup_contract( - &platform.drive, - "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase.json", - None, - None, - None::, - None, - None, - ); - - let dpns_contract = setup_contract( - &platform.drive, - "tests/supporting_files/contract/dpns/dpns-contract-contested-unique-index-with-contract-id.json", - None, - None, - None::, - None, - None, - ); - - let preorder = dpns_contract - .document_type_for_name("preorder") - .expect("expected a profile document type"); - - assert!(!preorder.documents_mutable()); - assert!(preorder.documents_can_be_deleted()); - assert!(!preorder.documents_transferable().is_transferable()); - - let domain = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type"); - - assert!(!domain.documents_mutable()); - // Deletion is disabled with data trigger - assert!(domain.documents_can_be_deleted()); - assert!(domain.documents_transferable().is_transferable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut preorder_document_1 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut preorder_document_2 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut preorder_document_3 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_3.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_1 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_2 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_3 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_3.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document_1.set("parentDomainName", "dash".into()); - document_1.set("normalizedParentDomainName", "dash".into()); - document_1.set("label", "quantum123".into()); - document_1.set("normalizedLabel", "quantum123".into()); - document_1.set("records.contract", dashpay_contract.id().into()); - document_1.set("subdomainRules.allowSubdomains", false.into()); - - document_2.set("parentDomainName", "dash".into()); - document_2.set("normalizedParentDomainName", "dash".into()); - document_2.set("label", "van89".into()); - document_2.set("normalizedLabel", "van89".into()); - document_2.set("records.contract", card_game.id().into()); - document_2.set("subdomainRules.allowSubdomains", false.into()); - - document_3.set("parentDomainName", "dash".into()); - document_3.set("normalizedParentDomainName", "dash".into()); - document_3.set("label", "jazz65".into()); - document_3.set("normalizedLabel", "jazz65".into()); - document_3.set("records.identity", document_3.owner_id().into()); - document_3.set("subdomainRules.allowSubdomains", false.into()); - - let salt_1: [u8; 32] = rng.gen(); - let salt_2: [u8; 32] = rng.gen(); - let salt_3: [u8; 32] = rng.gen(); - - let mut salted_domain_buffer_1: Vec = vec![]; - salted_domain_buffer_1.extend(salt_1); - salted_domain_buffer_1.extend("quantum123.dash".as_bytes()); - - let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); - - let mut salted_domain_buffer_2: Vec = vec![]; - salted_domain_buffer_2.extend(salt_2); - salted_domain_buffer_2.extend("van89.dash".as_bytes()); - - let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); - - let mut salted_domain_buffer_3: Vec = vec![]; - salted_domain_buffer_3.extend(salt_3); - salted_domain_buffer_3.extend("jazz65.dash".as_bytes()); - - let salted_domain_hash_3 = hash_double(salted_domain_buffer_3); - - preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); - preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); - preorder_document_3.set("saltedDomainHash", salted_domain_hash_3.into()); - - document_1.set("preorderSalt", salt_1.into()); - document_2.set("preorderSalt", salt_2.into()); - document_3.set("preorderSalt", salt_3.into()); - - let documents_batch_create_preorder_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_1, - preorder, - entropy.0, - &key_1, - 2, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_1 = - documents_batch_create_preorder_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_preorder_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_2, - preorder, - entropy.0, - &key_2, - 2, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_2 = - documents_batch_create_preorder_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_preorder_transition_3 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_3, - preorder, - entropy.0, - &key_3, - 2, - 0, - &signer_3, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_3 = - documents_batch_create_preorder_transition_3 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - document_1, - domain, - entropy.0, - &key_1, - 3, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_1 = - documents_batch_create_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - document_2, - domain, - entropy.0, - &key_2, - 3, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_2 = - documents_batch_create_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_3 = - BatchTransition::new_document_creation_transition_from_document( - document_3.clone(), - domain, - entropy.0, - &key_3, - 3, - 0, - &signer_3, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_3 = - documents_batch_create_transition_3 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_preorder_transition_1.clone(), - documents_batch_create_serialized_preorder_transition_2.clone(), - documents_batch_create_serialized_preorder_transition_3.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 3); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_transition_1.clone(), - documents_batch_create_serialized_transition_2.clone(), - documents_batch_create_serialized_transition_3.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 3); - - let mut order_by = IndexMap::new(); - - order_by.insert( - "records.identity".to_string(), - OrderClause { - field: "records.identity".to_string(), - ascending: true, - }, - ); - - let drive_query = DriveDocumentQuery { - contract: &dpns_contract, - document_type: domain, - internal_clauses: InternalClauses { - primary_key_in_clause: None, - primary_key_equal_clause: None, - in_clause: None, - range_clause: Some(WhereClause { - field: "records.identity".to_string(), - operator: WhereOperator::LessThanOrEquals, - value: Value::Bytes32([255; 32]), - }), - equal_clauses: Default::default(), - }, - offset: None, - limit: None, - order_by, - start_at: None, - start_at_included: false, - block_time_ms: None, - }; - - let documents = platform - .drive - .query_documents(drive_query, None, false, None, None) - .expect("expected to get back documents") - .documents_owned(); - - let transient_fields = domain - .transient_fields() - .iter() - .map(|a| a.as_str()) - .collect(); - - assert!(documents - .get(0) - .expect("expected a document") - .is_equal_ignoring_time_based_fields( - &document_3, - Some(transient_fields), - platform_version - ) - .expect("expected to run is equal")); - - let drive_query = DriveDocumentQuery { - contract: &dpns_contract, - document_type: domain, - internal_clauses: InternalClauses { - primary_key_in_clause: None, - primary_key_equal_clause: None, - in_clause: None, - range_clause: None, - equal_clauses: BTreeMap::from([( - "records.identity".to_string(), - WhereClause { - field: "records.identity".to_string(), - operator: WhereOperator::Equal, - value: Value::Null, - }, - )]), - }, - offset: None, - limit: None, - order_by: Default::default(), - start_at: None, - start_at_included: false, - block_time_ms: None, - }; - - let documents = platform - .drive - .query_documents(drive_query, None, false, None, None) - .expect("expected to get back documents") - .documents_owned(); - - // This is normal because we set that we could not query on null - assert_eq!(documents.len(), 0); - } - - #[test] - fn test_dpns_contract_references_with_no_contested_unique_index_null_searchable_true() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(433); - - let platform_state = platform.state.load(); - - let (identity_1, signer_1, key_1) = - setup_identity(&mut platform, 958, dash_to_credits!(0.5)); - - let (identity_2, signer_2, key_2) = - setup_identity(&mut platform, 93, dash_to_credits!(0.5)); - - let (identity_3, signer_3, key_3) = - setup_identity(&mut platform, 98, dash_to_credits!(0.5)); - - let dashpay_contract = setup_contract( - &platform.drive, - "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", - None, - None, - None::, - None, - None, - ); - - let card_game = setup_contract( - &platform.drive, - "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase.json", - None, - None, - None::, - None, - None, - ); - - let dpns_contract = setup_contract( - &platform.drive, - "tests/supporting_files/contract/dpns/dpns-contract-contested-unique-index-with-contract-id-null-searchable-true.json", - None, - None, - None::, - None, - None, - ); - - let preorder = dpns_contract - .document_type_for_name("preorder") - .expect("expected a profile document type"); - - assert!(!preorder.documents_mutable()); - assert!(preorder.documents_can_be_deleted()); - assert!(!preorder.documents_transferable().is_transferable()); - - let domain = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type"); - - assert!(!domain.documents_mutable()); - // Deletion is disabled with data trigger - assert!(domain.documents_can_be_deleted()); - assert!(domain.documents_transferable().is_transferable()); - - let entropy = Bytes32::random_with_rng(&mut rng); - - let mut preorder_document_1 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut preorder_document_2 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut preorder_document_3 = preorder - .random_document_with_identifier_and_entropy( - &mut rng, - identity_3.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_1 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_1.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_2 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_2.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - let mut document_3 = domain - .random_document_with_identifier_and_entropy( - &mut rng, - identity_3.id(), - entropy, - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - platform_version, - ) - .expect("expected a random document"); - - document_1.set("parentDomainName", "dash".into()); - document_1.set("normalizedParentDomainName", "dash".into()); - document_1.set("label", "quantum123".into()); - document_1.set("normalizedLabel", "quantum123".into()); - document_1.set("records.contract", dashpay_contract.id().into()); - document_1.set("subdomainRules.allowSubdomains", false.into()); - - document_2.set("parentDomainName", "dash".into()); - document_2.set("normalizedParentDomainName", "dash".into()); - document_2.set("label", "van89".into()); - document_2.set("normalizedLabel", "van89".into()); - document_2.set("records.contract", card_game.id().into()); - document_2.set("subdomainRules.allowSubdomains", false.into()); - - document_3.set("parentDomainName", "dash".into()); - document_3.set("normalizedParentDomainName", "dash".into()); - document_3.set("label", "jazz65".into()); - document_3.set("normalizedLabel", "jazz65".into()); - document_3.set("records.identity", document_3.owner_id().into()); - document_3.set("subdomainRules.allowSubdomains", false.into()); - - let salt_1: [u8; 32] = rng.gen(); - let salt_2: [u8; 32] = rng.gen(); - let salt_3: [u8; 32] = rng.gen(); - - let mut salted_domain_buffer_1: Vec = vec![]; - salted_domain_buffer_1.extend(salt_1); - salted_domain_buffer_1.extend("quantum123.dash".as_bytes()); - - let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); - - let mut salted_domain_buffer_2: Vec = vec![]; - salted_domain_buffer_2.extend(salt_2); - salted_domain_buffer_2.extend("van89.dash".as_bytes()); - - let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); - - let mut salted_domain_buffer_3: Vec = vec![]; - salted_domain_buffer_3.extend(salt_3); - salted_domain_buffer_3.extend("jazz65.dash".as_bytes()); - - let salted_domain_hash_3 = hash_double(salted_domain_buffer_3); - - preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); - preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); - preorder_document_3.set("saltedDomainHash", salted_domain_hash_3.into()); - - document_1.set("preorderSalt", salt_1.into()); - document_2.set("preorderSalt", salt_2.into()); - document_3.set("preorderSalt", salt_3.into()); - - let documents_batch_create_preorder_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_1, - preorder, - entropy.0, - &key_1, - 2, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_1 = - documents_batch_create_preorder_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_preorder_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_2, - preorder, - entropy.0, - &key_2, - 2, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_2 = - documents_batch_create_preorder_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_preorder_transition_3 = - BatchTransition::new_document_creation_transition_from_document( - preorder_document_3, - preorder, - entropy.0, - &key_3, - 2, - 0, - &signer_3, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_preorder_transition_3 = - documents_batch_create_preorder_transition_3 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_1 = - BatchTransition::new_document_creation_transition_from_document( - document_1, - domain, - entropy.0, - &key_1, - 3, - 0, - &signer_1, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_1 = - documents_batch_create_transition_1 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_2 = - BatchTransition::new_document_creation_transition_from_document( - document_2, - domain, - entropy.0, - &key_2, - 3, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_2 = - documents_batch_create_transition_2 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let documents_batch_create_transition_3 = - BatchTransition::new_document_creation_transition_from_document( - document_3.clone(), - domain, - entropy.0, - &key_3, - 3, - 0, - &signer_3, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition_3 = - documents_batch_create_transition_3 - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_preorder_transition_1.clone(), - documents_batch_create_serialized_preorder_transition_2.clone(), - documents_batch_create_serialized_preorder_transition_3.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 3); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![ - documents_batch_create_serialized_transition_1.clone(), - documents_batch_create_serialized_transition_2.clone(), - documents_batch_create_serialized_transition_3.clone(), - ], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - assert_eq!(processing_result.valid_count(), 3); - - let mut order_by = IndexMap::new(); - - order_by.insert( - "records.identity".to_string(), - OrderClause { - field: "records.identity".to_string(), - ascending: true, - }, - ); - - let drive_query = DriveDocumentQuery { - contract: &dpns_contract, - document_type: domain, - internal_clauses: InternalClauses { - primary_key_in_clause: None, - primary_key_equal_clause: None, - in_clause: None, - range_clause: Some(WhereClause { - field: "records.identity".to_string(), - operator: WhereOperator::LessThanOrEquals, - value: Value::Bytes32([255; 32]), - }), - equal_clauses: Default::default(), - }, - offset: None, - limit: None, - order_by, - start_at: None, - start_at_included: false, - block_time_ms: None, - }; - - let documents = platform - .drive - .query_documents(drive_query, None, false, None, None) - .expect("expected to get back documents") - .documents_owned(); - - // here we will get all 3 documents - assert_eq!(documents.len(), 3); - - let drive_query = DriveDocumentQuery { - contract: &dpns_contract, - document_type: domain, - internal_clauses: InternalClauses { - primary_key_in_clause: None, - primary_key_equal_clause: None, - in_clause: None, - range_clause: None, - equal_clauses: BTreeMap::from([( - "records.identity".to_string(), - WhereClause { - field: "records.identity".to_string(), - operator: WhereOperator::Equal, - value: Value::Null, - }, - )]), - }, - offset: None, - limit: None, - order_by: Default::default(), - start_at: None, - start_at_included: false, - block_time_ms: None, - }; - - let documents = platform - .drive - .query_documents(drive_query, None, false, None, None) - .expect("expected to get back documents") - .documents_owned(); - - assert_eq!(documents.len(), 2); - } - } - - mod token_tests { - use super::*; - use crate::execution::validation::state_transition::tests::create_token_contract_with_owner_identity; - use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; - use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Setters; - use dpp::data_contract::associated_token::token_configuration::TokenConfiguration; - use dpp::data_contract::associated_token::token_configuration_convention::v0::TokenConfigurationConventionV0; - use dpp::data_contract::associated_token::token_configuration_convention::v0::TokenConfigurationLocalizationsV0; - use dpp::data_contract::associated_token::token_distribution_rules::accessors::v0::TokenDistributionRulesV0Setters; - use dpp::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; - use dpp::data_contract::change_control_rules::v0::ChangeControlRulesV0; - use dpp::data_contract::change_control_rules::ChangeControlRules; - use dpp::group::GroupStateTransitionInfoStatus; - use dpp::state_transition::batch_transition::methods::v1::DocumentsBatchTransitionMethodsV1; - use dpp::state_transition::batch_transition::TokenConfigUpdateTransition; - mod token_mint_tests { - use super::*; - - mod token_mint_tests_normal_scenarios { - use super::*; - - #[test] - fn test_token_mint_by_owner_allowed_sending_to_self() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(101337)); - } - - #[test] - fn test_token_mint_by_owner_can_not_mint_past_max_supply() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply(Some(1000000)); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 2000000, - Some(identity.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::TokenMintPastMaxSupplyError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - } - - #[test] - fn test_token_mint_by_owner_allowed_sending_to_other() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (receiver, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(receiver.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - receiver.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(1337)); - } - - #[test] - fn test_token_mint_sending_to_non_existing_identity_causes_error() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let receiver = Identifier::random_with_rng(&mut rng); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(receiver), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::RecipientIdentityDoesNotExistError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - receiver.to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_by_owner_no_destination_causes_error() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - None, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::BasicError( - BasicError::DestinationIdentityForTokenMintingNotSetError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - } - - mod token_mint_tests_no_recipient_minting { - use super::*; - - #[test] - fn test_token_mint_by_owned_id_allowed_sending_to_self() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration - .distribution_rules_mut() - .set_minting_allow_choosing_destination(false); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::BasicError( - BasicError::ChoosingTokenMintRecipientNotAllowedError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - } - - #[test] - fn test_token_mint_by_owned_id_allowed_sending_to_other() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (receiver, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration - .distribution_rules_mut() - .set_minting_allow_choosing_destination(false); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(receiver.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::BasicError( - BasicError::ChoosingTokenMintRecipientNotAllowedError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - receiver.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_by_owned_id_no_destination_causes_error() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration - .distribution_rules_mut() - .set_minting_allow_choosing_destination(false); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - None, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::BasicError( - BasicError::DestinationIdentityForTokenMintingNotSetError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - } - - mod token_mint_tests_contract_has_recipient { - use super::*; - - #[test] - fn test_token_mint_by_owned_id_allowed_sending_to_self() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration - .distribution_rules_mut() - .set_minting_allow_choosing_destination(false); - token_configuration - .distribution_rules_mut() - .set_new_tokens_destination_identity(Some(identity.id())); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::BasicError( - BasicError::ChoosingTokenMintRecipientNotAllowedError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - } - - #[test] - fn test_token_mint_by_owned_id_allowed_sending_to_other() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (receiver, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration - .distribution_rules_mut() - .set_minting_allow_choosing_destination(false); - token_configuration - .distribution_rules_mut() - .set_new_tokens_destination_identity(Some(identity.id())); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(receiver.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::BasicError( - BasicError::ChoosingTokenMintRecipientNotAllowedError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - receiver.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_by_owned_id_no_set_destination_should_use_contracts() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration - .distribution_rules_mut() - .set_minting_allow_choosing_destination(false); - token_configuration - .distribution_rules_mut() - .set_new_tokens_destination_identity(Some(identity.id())); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - None, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(101337)); - } - } - - mod token_mint_tests_authorization_scenarios { - use super::*; - use dpp::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; - use dpp::data_contract::change_control_rules::v0::ChangeControlRulesV0; - use dpp::data_contract::change_control_rules::ChangeControlRules; - use dpp::data_contract::group::v0::GroupV0; - use dpp::data_contract::group::Group; - use dpp::group::{GroupStateTransitionInfo, GroupStateTransitionInfoStatus}; - use dpp::state_transition::batch_transition::TokenMintTransition; - - #[test] - fn test_token_mint_by_owner_sending_to_self_minting_not_allowed() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::NoOne, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - None, - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - } - - #[test] - fn test_token_mint_by_owner_sending_to_self_minting_only_allowed_by_group() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 5), (identity_2.id(), 5)].into(), - required_power: 10, - }), - )] - .into(), - ), - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - } - - #[test] - fn test_token_mint_by_owner_sending_to_self_minting_only_allowed_by_group_enough_member_power( - ) { - // We are using a group, but our member alone has enough power in the group to do the action - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 5), (identity_2.id(), 1)].into(), - required_power: 5, - }), - )] - .into(), - ), - platform_version, - ); - - let documents_batch_create_transition = - BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(101337)); - } - - #[test] - fn test_token_mint_by_owner_requires_group_other_member() { - // We are using a group, and two members need to sign for the event to happen - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer2, key2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (identity_2.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_mint_serialized_transition = token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - // Now we need to get the second identity to also sign it - let action_id = TokenMintTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - 1337, - ); - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key2, - 2, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(101337)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_by_owner_requires_group_resubmitting_causes_error() { - // We are using a group, and two members need to sign for the event to happen - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (identity_2.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_mint_serialized_transition = token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - // Now we need to get the second identity to also sign it, but we are going to resubmit with first - // This will create an error - let action_id = TokenMintTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - 1337, - ); - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::GroupActionAlreadySignedByIdentityError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_by_owner_requires_group_other_member_resubmitting_causes_error() - { - // We are using a group, and two members need to sign for the event to happen - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer2, key2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_3, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [ - (identity.id(), 1), - (identity_2.id(), 1), - (identity_3.id(), 1), - ] - .into(), - required_power: 3, - }), - )] - .into(), - ), - platform_version, - ); - - let token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_mint_serialized_transition = token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - // Now we need to get the second identity to also sign it - let action_id = TokenMintTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - 1337, - ); - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key2, - 2, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - - // Now we need to get the second identity to sign it again to cause the error - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key2, - 3, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::GroupActionAlreadySignedByIdentityError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_by_owner_requires_group_other_member_submitting_after_completion_causes_error( - ) { - // We are using a group, and two members need to sign for the event to happen - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer2, key2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_3, signer3, key3) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [ - (identity.id(), 1), - (identity_2.id(), 1), - (identity_3.id(), 1), - ] - .into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_mint_serialized_transition = token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - // Now we need to get the second identity to also sign it - let action_id = TokenMintTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - 1337, - ); - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key2, - 2, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(101337)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - - // Now we need to get the second identity to sign it again to cause the error - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity_3.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key3, - 2, - 0, - &signer3, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::GroupActionAlreadyCompletedError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(101337)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_by_owner_requires_group_proposer_not_in_group() { - // We are using a group, and two members need to sign for the event to happen - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_3, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity_3.id(), 1), (identity_2.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_mint_serialized_transition = token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::IdentityNotMemberOfGroupError( - _ - )), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - } - - #[test] - fn test_token_mint_by_owner_requires_group_other_signer_not_part_of_group() { - // We are using a group, and two members need to sign for the event to happen - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer2, key2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_3, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (identity_3.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_mint_serialized_transition = token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - // Now we need to get the second identity to also sign it - let action_id = TokenMintTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - 1337, - ); - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key2, - 2, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::IdentityNotMemberOfGroupError( - _ - )), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_mint_other_signer_going_first_causes_error() { - // We are using a group, and the second member gets a bit hasty and signs first - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer2, key2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (identity_2.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - // The second identity to also sign it - let action_id = TokenMintTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - 1337, - ); - let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key2, - 2, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let confirm_token_mint_serialized_transition = confirm_token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![confirm_token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::GroupActionDoesNotExistError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - } - } - - mod token_burn_tests { - use super::*; - - #[test] - fn test_token_burn() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let documents_batch_create_transition = BatchTransition::new_token_burn_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 100000 - 1337; - assert_eq!(token_balance, Some(expected_amount)); - } - - #[test] - fn test_token_burn_trying_to_burn_more_than_we_have() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let documents_batch_create_transition = BatchTransition::new_token_burn_transition( - token_id, - identity.id(), - contract.id(), - 0, - 200000, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::IdentityDoesNotHaveEnoughTokenBalanceError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); // nothing was burned - } - - #[test] - fn test_token_burn_gives_error_if_trying_to_burn_from_not_allowed_identity() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (contract_owner_identity, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - contract_owner_identity.id(), - None::, - None, - platform_version, - ); - - let documents_batch_create_transition = BatchTransition::new_token_burn_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let documents_batch_create_serialized_transition = - documents_batch_create_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![documents_batch_create_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - contract_owner_identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - } - - mod token_transfer_tests { - use dpp::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; - use dpp::data_contract::change_control_rules::ChangeControlRules; - use dpp::data_contract::change_control_rules::v0::ChangeControlRulesV0; - use dpp::data_contract::group::Group; - use dpp::state_transition::batch_transition::TokenMintTransition; - use dpp::data_contract::group::v0::GroupV0; - use dpp::group::{GroupStateTransitionInfo, GroupStateTransitionInfoStatus}; - use dpp::identity::SecurityLevel; - use dpp::state_transition::batch_transition::accessors::DocumentsBatchTransitionAccessorsV0; - use dpp::state_transition::batch_transition::batched_transition::token_transition::TokenTransition; - use dpp::state_transition::StateTransition; - use dpp::state_transition::batch_transition::batched_transition::BatchedTransitionMutRef; - use dpp::state_transition::batch_transition::token_base_transition::token_base_transition_accessors::TokenBaseTransitionAccessors; - use dpp::state_transition::batch_transition::token_base_transition::v0::v0_methods::TokenBaseTransitionV0Methods; - use super::*; - - #[test] - fn test_token_transfer() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (recipient, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let token_transfer_transition = BatchTransition::new_token_transfer_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - recipient.id(), - None, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_transfer_serialized_transition = token_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 100000 - 1337; - assert_eq!(token_balance, Some(expected_amount)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 1337; - assert_eq!(token_balance, Some(expected_amount)); - } - - #[test] - fn test_token_transfer_to_ourself_should_fail() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let token_transfer_transition = BatchTransition::new_token_transfer_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - identity.id(), - None, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_transfer_serialized_transition = token_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::UnpaidConsensusError( - ConsensusError::BasicError(BasicError::TokenTransferToOurselfError(_)) - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, Some(100000)); - } - - #[test] - fn test_token_transfer_trying_to_send_more_than_we_have() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (recipient, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - None::, - None, - platform_version, - ); - - let token_transfer_transition = BatchTransition::new_token_transfer_transition( - token_id, - identity.id(), - contract.id(), - 0, - 200000, - recipient.id(), - None, - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_transfer_serialized_transition = token_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::IdentityDoesNotHaveEnoughTokenBalanceError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 100000; - assert_eq!(token_balance, Some(expected_amount)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - - #[test] - fn test_token_transfer_adding_group_info_causes_error() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (recipient, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - // let's start by creating a real action - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_manual_minting_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (recipient.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let token_mint_transition = BatchTransition::new_token_mint_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - Some(identity.id()), - None, - Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_mint_serialized_transition = token_mint_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_mint_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let action_id = TokenMintTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - 1337, - ); - - let mut token_transfer_transition = BatchTransition::new_token_transfer_transition( - token_id, - identity.id(), - contract.id(), - 0, - 200000, - recipient.id(), - None, - None, - None, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - // here we add fake info - match &mut token_transfer_transition { - StateTransition::Batch(batch) => { - let first_transition = batch - .first_transition_mut() - .expect("expected_first_transition"); - match first_transition { - BatchedTransitionMutRef::Token(token) => match token { - TokenTransition::Transfer(transfer) => transfer - .base_mut() - .set_using_group_info(Some(GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: true, - })), - _ => {} - }, - _ => {} - } - } - _ => {} - } - - token_transfer_transition - .sign_external(&key, &signer, Some(|_, _| Ok(SecurityLevel::HIGH))) - .expect("expected to resign transaction"); - - let token_transfer_serialized_transition = token_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::UnpaidConsensusError( - ConsensusError::BasicError( - BasicError::GroupActionNotAllowedOnTransitionError(_) - ) - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 100000; - assert_eq!(token_balance, Some(expected_amount)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - assert_eq!(token_balance, None); - } - } - - mod token_freeze_tests { - use super::*; - use dpp::tokens::info::v0::IdentityTokenInfoV0Accessors; - - #[test] - fn test_token_freeze() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_freeze_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - None, - platform_version, - ); - - let freeze_transition = BatchTransition::new_token_freeze_transition( - token_id, - identity.id(), - contract.id(), - 0, - identity_2.id(), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let freeze_serialized_transition = freeze_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![freeze_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_frozen = platform - .drive - .fetch_identity_token_info( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token info") - .map(|info| info.frozen()); - assert_eq!(token_frozen, Some(true)); - } - - #[test] - fn test_token_freeze_and_unfreeze() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_freeze_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - token_configuration.set_unfreeze_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - None, - platform_version, - ); - - let freeze_transition = BatchTransition::new_token_freeze_transition( - token_id, - identity.id(), - contract.id(), - 0, - identity_2.id(), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let freeze_serialized_transition = freeze_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![freeze_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_frozen = platform - .drive - .fetch_identity_token_info( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token info") - .map(|info| info.frozen()); - assert_eq!(token_frozen, Some(true)); - - let unfreeze_transition = BatchTransition::new_token_unfreeze_transition( - token_id, - identity.id(), - contract.id(), - 0, - identity_2.id(), - None, - None, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let unfreeze_serialized_transition = unfreeze_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![unfreeze_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_frozen = platform - .drive - .fetch_identity_token_info( - token_id.to_buffer(), - identity_2.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token info") - .map(|info| info.frozen()); - assert_eq!(token_frozen, Some(false)); - } - - #[test] - fn test_token_frozen_receive_balance_allowed_sending_not_allowed_till_unfrozen() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (recipient, signer2, key2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_freeze_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - token_configuration.set_unfreeze_rules(ChangeControlRules::V0( - ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }, - )); - }), - None, - platform_version, - ); - - let freeze_transition = BatchTransition::new_token_freeze_transition( - token_id, - identity.id(), - contract.id(), - 0, - recipient.id(), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let freeze_serialized_transition = freeze_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![freeze_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_frozen = platform - .drive - .fetch_identity_token_info( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token info") - .map(|info| info.frozen()); - assert_eq!(token_frozen, Some(true)); - - let token_transfer_transition = BatchTransition::new_token_transfer_transition( - token_id, - identity.id(), - contract.id(), - 0, - 1337, - recipient.id(), - None, - None, - None, - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_transfer_serialized_transition = token_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 100000 - 1337; - assert_eq!(token_balance, Some(expected_amount)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 1337; - assert_eq!(token_balance, Some(expected_amount)); - - //now let's try sending our balance - - let token_transfer_back_transition = - BatchTransition::new_token_transfer_transition( - token_id, - recipient.id(), - contract.id(), - 0, - 300, - identity.id(), - None, - None, - None, - &key2, - 2, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_transfer_back_serialized_transition = token_transfer_back_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_transfer_back_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::IdentityTokenAccountFrozenError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - // We expect no change - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 100000 - 1337; - assert_eq!(token_balance, Some(expected_amount)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 1337; - assert_eq!(token_balance, Some(expected_amount)); - - let unfreeze_transition = BatchTransition::new_token_unfreeze_transition( - token_id, - identity.id(), - contract.id(), - 0, - recipient.id(), - None, - None, - &key, - 4, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let unfreeze_serialized_transition = unfreeze_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![unfreeze_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_frozen = platform - .drive - .fetch_identity_token_info( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token info") - .map(|info| info.frozen()); - assert_eq!(token_frozen, Some(false)); - - let token_transfer_transition = BatchTransition::new_token_transfer_transition( - token_id, - recipient.id(), - contract.id(), - 0, - 300, - identity.id(), - None, - None, - None, - &key2, - 3, - 0, - &signer2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let token_transfer_serialized_transition = token_transfer_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![token_transfer_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - identity.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 100000 - 1337 + 300; - assert_eq!(token_balance, Some(expected_amount)); - - let token_balance = platform - .drive - .fetch_identity_token_balance( - token_id.to_buffer(), - recipient.id().to_buffer(), - None, - platform_version, - ) - .expect("expected to fetch token balance"); - let expected_amount = 1337 - 300; - assert_eq!(token_balance, Some(expected_amount)); - } - } - - mod token_config_update_tests { - use super::*; - use dpp::data_contract::accessors::v1::DataContractV1Getters; - use dpp::data_contract::associated_token::token_configuration_convention::TokenConfigurationConvention; - use dpp::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; - - mod non_group { - use super::*; - #[test] - fn test_token_config_update_by_owner_changing_total_max_supply() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: - AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - None, - platform_version, - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupply(Some(1000000)), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!(updated_token_config.max_supply(), Some(1000000)); - } - - #[test] - fn test_token_config_update_by_owner_changing_total_max_supply_to_less_than_current_supply( - ) { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: - AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - None, - platform_version, - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupply(Some(1000)), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::TokenSettingMaxSupplyToLessThanCurrentSupplyError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!(updated_token_config.max_supply(), None); - } - - #[test] - fn test_token_config_update_by_owner_change_admin_to_another_identity() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer_2, key_2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: - AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::ContractOwner, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - None, - platform_version, - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupplyControlGroup( - AuthorizedActionTakers::Identity(identity_2.id()), - ), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupply(Some(1000000)), - None, - None, - &key_2, - 2, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!(updated_token_config.max_supply(), Some(1000000)); - } - - #[test] - fn test_token_config_update_by_owner_change_admin_to_a_non_existent_identity_error() - { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let identity_2_id = Identifier::random_with_rng(&mut rng); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: - AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::ContractOwner, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - None, - platform_version, - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupplyControlGroup( - AuthorizedActionTakers::Identity(identity_2_id), - ), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::NewAuthorizedActionTakerIdentityDoesNotExistError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - - #[test] - fn test_token_config_update_by_owner_change_admin_to_a_non_existent_group_error() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: - AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::ContractOwner, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - None, - platform_version, - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupplyControlGroup( - AuthorizedActionTakers::Group(0), - ), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::NewAuthorizedActionTakerGroupDoesNotExistError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - - #[test] - fn test_token_config_update_by_owner_change_admin_to_main_group_not_set_error() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: - AuthorizedActionTakers::ContractOwner, - admin_action_takers: AuthorizedActionTakers::ContractOwner, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - None, - platform_version, - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupplyControlGroup( - AuthorizedActionTakers::MainGroup, - ), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::NewAuthorizedActionTakerMainGroupNotSetError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - } - - mod with_group { - use super::*; - - #[test] - fn test_token_config_update_by_group_member_changing_total_max_supply_not_using_group_gives_error( - ) { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, _, _) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (identity_2.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupply(Some(1000000)), - None, - None, - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!(updated_token_config.max_supply(), None); - } - - #[test] - fn test_token_config_update_by_group_member_changing_total_max_supply() { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer_2, key_2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_max_supply_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::NoOne, - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: false, - }), - ); - }), - Some( - [( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (identity_2.id(), 1)].into(), - required_power: 2, - }), - )] - .into(), - ), - platform_version, - ); - - let action_id = TokenConfigUpdateTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - TokenConfigurationChangeItem::MaxSupply(Some(1000000)).u8_item_index(), - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupply(Some(1000000)), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0), - ), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let new_contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = new_contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!(updated_token_config.max_supply(), None); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::MaxSupply(Some(1000000)), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id, - action_is_proposer: false, - }, - ), - ), - &key_2, - 2, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let new_contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = new_contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!(updated_token_config.max_supply(), Some(1000000)); - } - - #[test] - fn test_token_config_change_own_admin_group_give_control_power_and_change_admin_back( - ) { - let platform_version = PlatformVersion::latest(); - let mut platform = TestPlatformBuilder::new() - .with_latest_protocol_version() - .build_with_mock_rpc() - .set_genesis_state(); - - let mut rng = StdRng::seed_from_u64(49853); - - let platform_state = platform.state.load(); - - let (identity, signer, key) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_2, signer_2, key_2) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_3, signer_3, key_3) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_4, signer_4, key_4) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (identity_5, signer_5, key_5) = - setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); - - let (contract, token_id) = create_token_contract_with_owner_identity( - &mut platform, - identity.id(), - Some(|token_configuration: &mut TokenConfiguration| { - token_configuration.set_conventions_change_rules( - ChangeControlRules::V0(ChangeControlRulesV0 { - authorized_to_make_change: AuthorizedActionTakers::Group(0), - admin_action_takers: AuthorizedActionTakers::Group(1), - changing_authorized_action_takers_to_no_one_allowed: false, - changing_admin_action_takers_to_no_one_allowed: false, - self_changing_admin_action_takers_allowed: true, - }), - ); - }), - Some( - [ - ( - 0, - Group::V0(GroupV0 { - members: [(identity.id(), 1), (identity_2.id(), 1)].into(), - required_power: 2, - }), - ), - ( - 1, - Group::V0(GroupV0 { - members: [ - (identity_3.id(), 1), - (identity_4.id(), 1), - (identity_5.id(), 1), - ] - .into(), - required_power: 2, - }), - ), - ] - .into(), - ), - platform_version, - ); - - let action_id = TokenConfigUpdateTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity_3.id().as_bytes(), - 2, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(0), - ) - .u8_item_index(), - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_3.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(0), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(1), - ), - &key_3, - 2, - 0, - &signer_3, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let new_contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = new_contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!( - updated_token_config - .conventions_change_rules() - .admin_action_takers(), - &AuthorizedActionTakers::Group(1) - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_4.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(0), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 1, - action_id, - action_is_proposer: false, - }, - ), - ), - &key_4, - 2, - 0, - &signer_4, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let new_contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = new_contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!( - updated_token_config - .conventions_change_rules() - .admin_action_takers(), - &AuthorizedActionTakers::Group(0) - ); - assert_eq!(new_contract.contract.version(), 2); - - // 5 is late to the game, admin control has already been transferred, he should get an error - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_5.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(0), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 1, - action_id, - action_is_proposer: false, - }, - ), - ), - &key_5, - 2, - 0, - &signer_5, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError( - StateError::GroupActionAlreadyCompletedError(_) - ), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - // Let's try if he proposes it now - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_5.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(0), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(1), - ), - &key_5, - 3, - 0, - &signer_5, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - // Now let's have Group 0 change the control of the conventions to identity 2 only - - let action_id_change_control = - TokenConfigUpdateTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 2, - TokenConfigurationChangeItem::ConventionsControlGroup( - AuthorizedActionTakers::Identity(identity_2.id()), - ) - .u8_item_index(), - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsControlGroup( - AuthorizedActionTakers::Identity(identity_2.id()), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0), - ), - &key, - 2, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsControlGroup( - AuthorizedActionTakers::Identity(identity_2.id()), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id: action_id_change_control, - action_is_proposer: false, - }, - ), - ), - &key_2, - 2, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let new_contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = new_contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!( - updated_token_config - .conventions_change_rules() - .authorized_to_make_change_action_takers(), - &AuthorizedActionTakers::Identity(identity_2.id()) - ); - assert_eq!(new_contract.contract.version(), 3); - - // Now let's have Group 0 hand it back to Group 1 - - let action_id_return = - TokenConfigUpdateTransition::calculate_action_id_with_fields( - token_id.as_bytes(), - identity.id().as_bytes(), - 3, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(1), - ) - .u8_item_index(), - ); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(1), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0), - ), - &key, - 3, - 0, - &signer, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::ConventionsAdminGroup( - AuthorizedActionTakers::Group(1), - ), - None, - Some( - GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( - GroupStateTransitionInfo { - group_contract_position: 0, - action_id: action_id_return, - action_is_proposer: false, - }, - ), - ), - &key_2, - 3, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - let new_contract = platform - .drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, - platform_version, - ) - .unwrap() - .expect("expected to fetch token balance") - .expect("expected contract"); - let updated_token_config = new_contract - .contract - .expected_token_configuration(0) - .expect("expected token configuration"); - assert_eq!( - updated_token_config - .conventions_change_rules() - .admin_action_takers(), - &AuthorizedActionTakers::Group(1) - ); - assert_eq!(new_contract.contract.version(), 4); - - // Not let's try identity 3 to change the conventions (should fail) - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_3.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::Conventions( - TokenConfigurationConvention::V0(TokenConfigurationConventionV0 { - localizations: [( - "en".to_string(), - TokenConfigurationLocalizationsV0 { - should_capitalize: true, - singular_form: "garzon".to_string(), - plural_form: "garzons".to_string(), - }, - )] - .into(), - decimals: 8, - }), - ), - None, - None, - &key_3, - 3, - 0, - &signer_3, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::PaidConsensusError( - ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), - _ - )] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - - // Not let's try identity 2 to change the conventions (should succeed) - - let config_update_transition = - BatchTransition::new_token_config_update_transition( - token_id, - identity_2.id(), - contract.id(), - 0, - TokenConfigurationChangeItem::Conventions( - TokenConfigurationConvention::V0(TokenConfigurationConventionV0 { - localizations: [( - "en".to_string(), - TokenConfigurationLocalizationsV0 { - should_capitalize: true, - singular_form: "garzon".to_string(), - plural_form: "garzons".to_string(), - }, - )] - .into(), - decimals: 8, - }), - ), - None, - None, - &key_2, - 4, - 0, - &signer_2, - platform_version, - None, - None, - None, - ) - .expect("expect to create documents batch transition"); - - let config_update_transition_serialized_transition = config_update_transition - .serialize_to_bytes() - .expect("expected documents batch serialized state transition"); - - let transaction = platform.drive.grove.start_transaction(); - - let processing_result = platform - .platform - .process_raw_state_transitions( - &vec![config_update_transition_serialized_transition.clone()], - &platform_state, - &BlockInfo::default(), - &transaction, - platform_version, - false, - None, - ) - .expect("expected to process state transition"); - - assert_matches!( - processing_result.execution_results().as_slice(), - [StateTransitionExecutionResult::SuccessfulExecution(_, _)] - ); - - platform - .drive - .grove - .commit_transaction(transaction) - .unwrap() - .expect("expected to commit transaction"); - } - } - } - } -} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/state/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/state/v0/mod.rs index 6db47d9f743..bf28e8c9e16 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/state/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/state/v0/mod.rs @@ -22,6 +22,7 @@ use crate::execution::validation::state_transition::batch::action_validation::do use crate::execution::validation::state_transition::batch::action_validation::document::document_transfer_transition_action::DocumentTransferTransitionActionValidation; use crate::execution::validation::state_transition::batch::action_validation::document::document_update_price_transition_action::DocumentUpdatePriceTransitionActionValidation; use crate::execution::validation::state_transition::batch::action_validation::token::token_burn_transition_action::TokenBurnTransitionActionValidation; +use crate::execution::validation::state_transition::batch::action_validation::token::token_claim_transition_action::TokenClaimTransitionActionValidation; use crate::execution::validation::state_transition::batch::action_validation::token::token_config_update_transition_action::TokenConfigUpdateTransitionActionValidation; use crate::execution::validation::state_transition::batch::action_validation::token::token_destroy_frozen_funds_transition_action::TokenDestroyFrozenFundsTransitionActionValidation; use crate::execution::validation::state_transition::batch::action_validation::token::token_emergency_action_transition_action::TokenEmergencyActionTransitionActionValidation; @@ -219,6 +220,15 @@ impl DocumentsBatchStateTransitionStateValidationV0 for BatchTransition { platform_version, )? } + TokenTransitionAction::ClaimAction(claim_action) => claim_action + .validate_state( + platform, + owner_id, + block_info, + execution_context, + transaction, + platform_version, + )?, }, BatchedTransitionAction::BumpIdentityDataContractNonce(_) => { return Err(Error::Execution(ExecutionError::CorruptedCodeExecution( diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/creation.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/creation.rs new file mode 100644 index 00000000000..f9b24000b64 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/creation.rs @@ -0,0 +1,2442 @@ +use super::*; + +mod creation_tests { + use super::*; + use dapi_grpc::platform::v0::{get_contested_resource_vote_state_request, get_contested_resource_vote_state_response, GetContestedResourceVoteStateRequest, GetContestedResourceVoteStateResponse}; + use dapi_grpc::platform::v0::get_contested_resource_vote_state_request::get_contested_resource_vote_state_request_v0::ResultType; + use dapi_grpc::platform::v0::get_contested_resource_vote_state_request::GetContestedResourceVoteStateRequestV0; + use dapi_grpc::platform::v0::get_contested_resource_vote_state_response::{get_contested_resource_vote_state_response_v0, GetContestedResourceVoteStateResponseV0}; + use assert_matches::assert_matches; + use rand::distributions::Standard; + use dpp::consensus::basic::document::DocumentFieldMaxSizeExceededError; + use dpp::consensus::ConsensusError; + use dpp::consensus::basic::BasicError; + use dpp::fee::fee_result::refunds::FeeRefunds; + use dpp::fee::fee_result::FeeResult; + use dpp::data_contract::accessors::v0::DataContractV0Setters; + use dpp::data_contract::document_type::restricted_creation::CreationRestrictionMode; + use dpp::document::Document; + use dpp::document::serialization_traits::DocumentPlatformConversionMethodsV0; + use dpp::util::hash::hash_double; + use dpp::voting::vote_choices::resource_vote_choice::ResourceVoteChoice; + use dpp::voting::vote_choices::resource_vote_choice::ResourceVoteChoice::TowardsIdentity; + use drive::util::object_size_info::DataContractResolvedInfo; + use drive::drive::votes::resolved::vote_polls::contested_document_resource_vote_poll::ContestedDocumentResourceVotePollWithContractInfoAllowBorrowed; + use drive::query::vote_poll_vote_state_query::ContestedDocumentVotePollDriveQueryResultType::DocumentsAndVoteTally; + use drive::query::vote_poll_vote_state_query::ResolvedContestedDocumentVotePollDriveQuery; + use drive::util::test_helpers::setup_contract; + use crate::execution::validation::state_transition::state_transitions::tests::{add_contender_to_dpns_name_contest, create_dpns_identity_name_contest, create_dpns_name_contest_give_key_info, perform_votes_multi}; + use crate::platform_types::platform_state::v0::PlatformStateV0Methods; + use crate::platform_types::state_transitions_processing_result::StateTransitionExecutionResult::PaidConsensusError; + use crate::test::helpers::fast_forward_to_block::fast_forward_to_block; + use dpp::consensus::state::state_error::StateError; + use dpp::dashcore::Network; + use dpp::dashcore::Network::Testnet; + use dpp::data_contract::DataContract; + use dpp::identity::SecurityLevel; + use dpp::state_transition::batch_transition::document_base_transition::DocumentBaseTransition; + use dpp::state_transition::batch_transition::document_create_transition::DocumentCreateTransitionV0; + use dpp::state_transition::batch_transition::{DocumentCreateTransition, BatchTransitionV0}; + use dpp::state_transition::StateTransition; + use crate::config::PlatformConfig; + + #[test] + fn test_document_creation() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + + #[test] + fn test_document_creation_should_fail_if_reusing_entropy() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + // Now let's create a second document with the same entropy + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/coy.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::DocumentAlreadyPresentError { .. }), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + + #[test] + fn test_document_creation_with_very_big_field() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay_contract_no_max_length = setup_contract( + &platform.drive, + "tests/supporting_files/contract/dashpay/dashpay-contract-no-max-length.json", + None, + None, + None::, + None, + None, + ); + + let dashpay_contract = dashpay_contract_no_max_length.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let max_field_size = platform_version.system_limits.max_field_value_size; + let avatar_size = max_field_size + 1000; + + document.set( + "avatar", + Value::Bytes( + rng.sample_iter(Standard) + .take(avatar_size as usize) + .collect(), + ), + ); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + assert_eq!( + processing_result.execution_results().first().unwrap(), + &PaidConsensusError( + ConsensusError::BasicError(BasicError::DocumentFieldMaxSizeExceededError( + DocumentFieldMaxSizeExceededError::new( + "avatar".to_string(), + avatar_size as u64, + max_field_size as u64 + ) + )), + FeeResult { + storage_fee: 11556000, + processing_fee: 526140, + fee_refunds: FeeRefunds::default(), + removed_bytes_from_system: 0 + } + ) + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + + #[test] + fn test_document_creation_on_contested_unique_index() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity_1, signer_1, key_1) = + setup_identity(&mut platform, 958, dash_to_credits!(0.5)); + + let (identity_2, signer_2, key_2) = + setup_identity(&mut platform, 93, dash_to_credits!(0.5)); + + let dpns = platform.drive.cache.system_data_contracts.load_dpns(); + let dpns_contract = dpns.clone(); + + let preorder = dpns_contract + .document_type_for_name("preorder") + .expect("expected a profile document type"); + + assert!(!preorder.documents_mutable()); + assert!(preorder.documents_can_be_deleted()); + assert!(!preorder.documents_transferable().is_transferable()); + + let domain = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type"); + + assert!(!domain.documents_mutable()); + // Deletion is disabled with data trigger + assert!(domain.documents_can_be_deleted()); + assert!(domain.documents_transferable().is_transferable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut preorder_document_1 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut preorder_document_2 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_1 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_2 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document_1.set("parentDomainName", "dash".into()); + document_1.set("normalizedParentDomainName", "dash".into()); + document_1.set("label", "quantum".into()); + document_1.set("normalizedLabel", "quantum".into()); + document_1.set("records.identity", document_1.owner_id().into()); + document_1.set("subdomainRules.allowSubdomains", false.into()); + + document_2.set("parentDomainName", "dash".into()); + document_2.set("normalizedParentDomainName", "dash".into()); + document_2.set("label", "quantum".into()); + document_2.set("normalizedLabel", "quantum".into()); + document_2.set("records.identity", document_2.owner_id().into()); + document_2.set("subdomainRules.allowSubdomains", false.into()); + + let salt_1: [u8; 32] = rng.gen(); + let salt_2: [u8; 32] = rng.gen(); + + let mut salted_domain_buffer_1: Vec = vec![]; + salted_domain_buffer_1.extend(salt_1); + salted_domain_buffer_1.extend("quantum.dash".as_bytes()); + + let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); + + let mut salted_domain_buffer_2: Vec = vec![]; + salted_domain_buffer_2.extend(salt_2); + salted_domain_buffer_2.extend("quantum.dash".as_bytes()); + + let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); + + preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); + preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); + + document_1.set("preorderSalt", salt_1.into()); + document_2.set("preorderSalt", salt_2.into()); + + let documents_batch_create_preorder_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_1, + preorder, + entropy.0, + &key_1, + 2, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_1 = + documents_batch_create_preorder_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_preorder_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_2, + preorder, + entropy.0, + &key_2, + 2, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_2 = + documents_batch_create_preorder_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + document_1, + domain, + entropy.0, + &key_1, + 3, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_1 = documents_batch_create_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + document_2, + domain, + entropy.0, + &key_2, + 3, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_2 = documents_batch_create_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_preorder_transition_1.clone(), + documents_batch_create_serialized_preorder_transition_2.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 2); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_transition_1.clone(), + documents_batch_create_serialized_transition_2.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 2); + + // Now let's run a query for the vote totals + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) + .expect("expected to encode the word dash"); + + let quantum_encoded = bincode::encode_to_vec(Value::Text("quantum".to_string()), config) + .expect("expected to encode the word quantum"); + + let index_name = "parentNameAndLabel".to_string(); + + let query_validation_result = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: domain.name().clone(), + index_name: index_name.clone(), + index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: false, + start_at_identifier_info: None, + count: None, + prove: false, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = query_validation_result.version.expect("expected a version"); + + let Some( + get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( + get_contested_resource_vote_state_response_v0::ContestedResourceContenders { + contenders, + abstain_vote_tally, + lock_vote_tally, + finished_vote_info, + }, + ), + ) = result + else { + panic!("expected contenders") + }; + + assert_eq!(abstain_vote_tally, None); + + assert_eq!(lock_vote_tally, None); + + assert_eq!(finished_vote_info, None); + + assert_eq!(contenders.len(), 2); + + let first_contender = contenders.first().unwrap(); + + let second_contender = contenders.last().unwrap(); + + let first_contender_document = Document::from_bytes( + first_contender + .document + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + let second_contender_document = Document::from_bytes( + second_contender + .document + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + assert_ne!(first_contender_document, second_contender_document); + + assert_eq!(first_contender.identifier, identity_1.id().to_vec()); + + assert_eq!(second_contender.identifier, identity_2.id().to_vec()); + + assert_eq!(first_contender.vote_count, Some(0)); + + assert_eq!(second_contender.vote_count, Some(0)); + + let GetContestedResourceVoteStateResponse { version } = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: domain.name().clone(), + index_name: "parentNameAndLabel".to_string(), + index_values: vec![dash_encoded, quantum_encoded], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: true, + start_at_identifier_info: None, + count: None, + prove: true, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = version.expect("expected a version"); + + let Some(get_contested_resource_vote_state_response_v0::Result::Proof(proof)) = result + else { + panic!("expected contenders") + }; + + let resolved_contested_document_vote_poll_drive_query = + ResolvedContestedDocumentVotePollDriveQuery { + vote_poll: ContestedDocumentResourceVotePollWithContractInfoAllowBorrowed { + contract: DataContractResolvedInfo::BorrowedDataContract(&dpns_contract), + document_type_name: domain.name().clone(), + index_name: index_name.clone(), + index_values: vec![ + Value::Text("dash".to_string()), + Value::Text("quantum".to_string()), + ], + }, + result_type: DocumentsAndVoteTally, + offset: None, + limit: None, + start_at: None, + allow_include_locked_and_abstaining_vote_tally: true, + }; + + let (_root_hash, result) = resolved_contested_document_vote_poll_drive_query + .verify_vote_poll_vote_state_proof(proof.grovedb_proof.as_ref(), platform_version) + .expect("expected to verify proof"); + + let contenders = result.contenders; + assert_eq!(contenders.len(), 2); + + let first_contender = contenders.first().unwrap(); + + let second_contender = contenders.last().unwrap(); + + let first_contender_document = Document::from_bytes( + first_contender + .serialized_document() + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + let second_contender_document = Document::from_bytes( + second_contender + .serialized_document() + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + assert_ne!(first_contender_document, second_contender_document); + + assert_eq!(first_contender.identity_id(), identity_1.id()); + + assert_eq!(second_contender.identity_id(), identity_2.id()); + + assert_eq!(first_contender.vote_tally(), Some(0)); + + assert_eq!(second_contender.vote_tally(), Some(0)); + } + + #[test] + fn test_document_creation_on_contested_unique_index_should_fail_if_not_paying_for_it() { + let platform_version = PlatformVersion::latest(); + let platform_config = PlatformConfig { + network: Network::Dash, + ..Default::default() + }; + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .with_config(platform_config) + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity_1, signer_1, key_1) = + setup_identity(&mut platform, 958, dash_to_credits!(0.5)); + + let dpns = platform.drive.cache.system_data_contracts.load_dpns(); + let dpns_contract = dpns.clone(); + + let preorder = dpns_contract + .document_type_for_name("preorder") + .expect("expected a profile document type"); + + assert!(!preorder.documents_mutable()); + assert!(preorder.documents_can_be_deleted()); + assert!(!preorder.documents_transferable().is_transferable()); + + let domain = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type"); + + assert!(!domain.documents_mutable()); + // Deletion is disabled with data trigger + assert!(domain.documents_can_be_deleted()); + assert!(domain.documents_transferable().is_transferable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut preorder_document_1 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_1 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document_1.set("parentDomainName", "dash".into()); + document_1.set("normalizedParentDomainName", "dash".into()); + document_1.set("label", "quantum".into()); + document_1.set("normalizedLabel", "quantum".into()); + document_1.set("records.identity", document_1.owner_id().into()); + document_1.set("subdomainRules.allowSubdomains", false.into()); + + let salt_1: [u8; 32] = rng.gen(); + + let mut salted_domain_buffer_1: Vec = vec![]; + salted_domain_buffer_1.extend(salt_1); + salted_domain_buffer_1.extend("quantum.dash".as_bytes()); + + let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); + + preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); + + document_1.set("preorderSalt", salt_1.into()); + + let documents_batch_create_preorder_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_1, + preorder, + entropy.0, + &key_1, + 2, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_1 = + documents_batch_create_preorder_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let owner_id = document_1.owner_id(); + let create_transition: DocumentCreateTransition = DocumentCreateTransitionV0 { + base: DocumentBaseTransition::from_document( + &document_1, + domain, + 3, + platform_version, + None, + ) + .expect("expected a base transition"), + entropy: entropy.0, + data: document_1.clone().properties_consumed(), + // Sending 0 balance that should not be valid + prefunded_voting_balance: None, + } + .into(); + let documents_batch_inner_create_transition_1: BatchTransition = BatchTransitionV0 { + owner_id, + transitions: vec![create_transition.into()], + user_fee_increase: 0, + signature_public_key_id: 0, + signature: Default::default(), + } + .into(); + let mut documents_batch_create_transition_1: StateTransition = + documents_batch_inner_create_transition_1.into(); + documents_batch_create_transition_1 + .sign_external(&key_1, &signer_1, Some(|_, _| Ok(SecurityLevel::HIGH))) + .expect("expected to sign"); + + let documents_batch_create_serialized_transition_1 = documents_batch_create_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_preorder_transition_1.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 1); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition_1.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [PaidConsensusError( + ConsensusError::StateError(StateError::DocumentContestNotPaidForError(_)), + _ + )] + ); + + // Now let's run a query for the vote totals + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) + .expect("expected to encode the word dash"); + + let quantum_encoded = bincode::encode_to_vec(Value::Text("quantum".to_string()), config) + .expect("expected to encode the word quantum"); + + let index_name = "parentNameAndLabel".to_string(); + + let query_validation_result = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: domain.name().clone(), + index_name: index_name.clone(), + index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: false, + start_at_identifier_info: None, + count: None, + prove: false, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = query_validation_result.version.expect("expected a version"); + + let Some( + get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( + get_contested_resource_vote_state_response_v0::ContestedResourceContenders { + contenders, + abstain_vote_tally, + lock_vote_tally, + finished_vote_info, + }, + ), + ) = result + else { + panic!("expected contenders") + }; + + assert_eq!(abstain_vote_tally, None); + + assert_eq!(lock_vote_tally, None); + + assert_eq!(finished_vote_info, None); + + assert_eq!(contenders.len(), 0); + + let drive_query = + DriveDocumentQuery::new_primary_key_single_item_query(&dpns, domain, document_1.id()); + + let documents = platform + .drive + .query_documents(drive_query, None, false, None, None) + .expect("expected to get back documents") + .documents_owned(); + + assert!(documents.first().is_none()); + } + + #[test] + fn test_document_creation_on_contested_unique_index_should_not_fail_if_not_paying_for_it_on_testnet_before_epoch_2080( + ) { + let platform_version = PlatformVersion::latest(); + let platform_config = PlatformConfig { + network: Testnet, + ..Default::default() + }; + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .with_config(platform_config) + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity_1, signer_1, key_1) = + setup_identity(&mut platform, 958, dash_to_credits!(0.5)); + + let dpns = platform.drive.cache.system_data_contracts.load_dpns(); + let dpns_contract = dpns.clone(); + + let preorder = dpns_contract + .document_type_for_name("preorder") + .expect("expected a profile document type"); + + assert!(!preorder.documents_mutable()); + assert!(preorder.documents_can_be_deleted()); + assert!(!preorder.documents_transferable().is_transferable()); + + let domain = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type"); + + assert!(!domain.documents_mutable()); + // Deletion is disabled with data trigger + assert!(domain.documents_can_be_deleted()); + assert!(domain.documents_transferable().is_transferable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut preorder_document_1 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_1 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document_1.set("parentDomainName", "dash".into()); + document_1.set("normalizedParentDomainName", "dash".into()); + document_1.set("label", "quantum".into()); + document_1.set("normalizedLabel", "quantum".into()); + document_1.set("records.identity", document_1.owner_id().into()); + document_1.set("subdomainRules.allowSubdomains", false.into()); + + let salt_1: [u8; 32] = rng.gen(); + + let mut salted_domain_buffer_1: Vec = vec![]; + salted_domain_buffer_1.extend(salt_1); + salted_domain_buffer_1.extend("quantum.dash".as_bytes()); + + let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); + + preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); + + document_1.set("preorderSalt", salt_1.into()); + + let documents_batch_create_preorder_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_1, + preorder, + entropy.0, + &key_1, + 2, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_1 = + documents_batch_create_preorder_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let owner_id = document_1.owner_id(); + let create_transition: DocumentCreateTransition = DocumentCreateTransitionV0 { + base: DocumentBaseTransition::from_document( + &document_1, + domain, + 3, + platform_version, + None, + ) + .expect("expected a base transition"), + entropy: entropy.0, + data: document_1.clone().properties_consumed(), + prefunded_voting_balance: None, + } + .into(); + let documents_batch_inner_create_transition_1: BatchTransition = BatchTransitionV0 { + owner_id, + transitions: vec![create_transition.into()], + user_fee_increase: 0, + signature_public_key_id: 0, + signature: Default::default(), + } + .into(); + let mut documents_batch_create_transition_1: StateTransition = + documents_batch_inner_create_transition_1.into(); + documents_batch_create_transition_1 + .sign_external(&key_1, &signer_1, Some(|_, _| Ok(SecurityLevel::HIGH))) + .expect("expected to sign"); + + let documents_batch_create_serialized_transition_1 = documents_batch_create_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_preorder_transition_1.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 1); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition_1.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(..)] + ); + + // Now let's run a query for the vote totals + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) + .expect("expected to encode the word dash"); + + let quantum_encoded = bincode::encode_to_vec(Value::Text("quantum".to_string()), config) + .expect("expected to encode the word quantum"); + + let index_name = "parentNameAndLabel".to_string(); + + let query_validation_result = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: domain.name().clone(), + index_name: index_name.clone(), + index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: false, + start_at_identifier_info: None, + count: None, + prove: false, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = query_validation_result.version.expect("expected a version"); + + let Some( + get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( + get_contested_resource_vote_state_response_v0::ContestedResourceContenders { + contenders, + abstain_vote_tally, + lock_vote_tally, + finished_vote_info, + }, + ), + ) = result + else { + panic!("expected contenders") + }; + + assert_eq!(abstain_vote_tally, None); + + assert_eq!(lock_vote_tally, None); + + assert_eq!(finished_vote_info, None); + + assert_eq!(contenders.len(), 0); // no contenders should have been created the document should just exist + + let drive_query = + DriveDocumentQuery::new_primary_key_single_item_query(&dpns, domain, document_1.id()); + + let documents = platform + .drive + .query_documents(drive_query, None, false, None, None) + .expect("expected to get back documents") + .documents_owned(); + + assert!(documents.first().is_some()); + } + + #[test] + fn test_document_creation_on_contested_unique_index_should_fail_if_reusing_entropy() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity_1, signer_1, key_1) = + setup_identity(&mut platform, 958, dash_to_credits!(0.5)); + + let (identity_2, signer_2, key_2) = + setup_identity(&mut platform, 93, dash_to_credits!(0.5)); + + let dpns = platform.drive.cache.system_data_contracts.load_dpns(); + let dpns_contract = dpns.clone(); + + let preorder = dpns_contract + .document_type_for_name("preorder") + .expect("expected a profile document type"); + + assert!(!preorder.documents_mutable()); + assert!(preorder.documents_can_be_deleted()); + assert!(!preorder.documents_transferable().is_transferable()); + + let domain = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type"); + + assert!(!domain.documents_mutable()); + // Deletion is disabled with data trigger + assert!(domain.documents_can_be_deleted()); + assert!(domain.documents_transferable().is_transferable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut preorder_document_1 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut preorder_document_2 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let new_entropy = Bytes32::random_with_rng(&mut rng); + + let mut preorder_document_3_on_identity_1 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + new_entropy, //change entropy here + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_1 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_2 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_3_on_identity_1 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, //same entropy + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document_1.set("parentDomainName", "dash".into()); + document_1.set("normalizedParentDomainName", "dash".into()); + document_1.set("label", "quantum".into()); + document_1.set("normalizedLabel", "quantum".into()); + document_1.set("records.identity", document_1.owner_id().into()); + document_1.set("subdomainRules.allowSubdomains", false.into()); + + document_2.set("parentDomainName", "dash".into()); + document_2.set("normalizedParentDomainName", "dash".into()); + document_2.set("label", "quantum".into()); + document_2.set("normalizedLabel", "quantum".into()); + document_2.set("records.identity", document_2.owner_id().into()); + document_2.set("subdomainRules.allowSubdomains", false.into()); + + document_3_on_identity_1.set("parentDomainName", "dash".into()); + document_3_on_identity_1.set("normalizedParentDomainName", "dash".into()); + document_3_on_identity_1.set("label", "cry".into()); + document_3_on_identity_1.set("normalizedLabel", "cry".into()); + document_3_on_identity_1.set( + "records.identity", + document_3_on_identity_1.owner_id().into(), + ); + document_3_on_identity_1.set("subdomainRules.allowSubdomains", false.into()); + + let salt_1: [u8; 32] = rng.gen(); + let salt_2: [u8; 32] = rng.gen(); + let salt_3: [u8; 32] = rng.gen(); + + let mut salted_domain_buffer_1: Vec = vec![]; + salted_domain_buffer_1.extend(salt_1); + salted_domain_buffer_1.extend("quantum.dash".as_bytes()); + + let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); + + let mut salted_domain_buffer_2: Vec = vec![]; + salted_domain_buffer_2.extend(salt_2); + salted_domain_buffer_2.extend("quantum.dash".as_bytes()); + + let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); + + let mut salted_domain_buffer_3: Vec = vec![]; + salted_domain_buffer_3.extend(salt_3); + salted_domain_buffer_3.extend("cry.dash".as_bytes()); + + let salted_domain_hash_3 = hash_double(salted_domain_buffer_3); + + preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); + preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); + preorder_document_3_on_identity_1.set("saltedDomainHash", salted_domain_hash_3.into()); + + document_1.set("preorderSalt", salt_1.into()); + document_2.set("preorderSalt", salt_2.into()); + document_3_on_identity_1.set("preorderSalt", salt_3.into()); + + let documents_batch_create_preorder_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_1, + preorder, + entropy.0, + &key_1, + 2, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_1 = + documents_batch_create_preorder_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_preorder_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_2, + preorder, + entropy.0, + &key_2, + 2, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_2 = + documents_batch_create_preorder_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_preorder_transition_3 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_3_on_identity_1, + preorder, + new_entropy.0, + &key_1, + 3, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_3 = + documents_batch_create_preorder_transition_3 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + document_1, + domain, + entropy.0, + &key_1, + 4, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_1 = documents_batch_create_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + document_2, + domain, + entropy.0, + &key_2, + 3, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_2 = documents_batch_create_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_3 = + BatchTransition::new_document_creation_transition_from_document( + document_3_on_identity_1, + domain, + entropy.0, + &key_1, + 5, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_3 = documents_batch_create_transition_3 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_preorder_transition_1.clone(), + documents_batch_create_serialized_preorder_transition_2.clone(), + documents_batch_create_serialized_preorder_transition_3.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 3); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_transition_1.clone(), + documents_batch_create_serialized_transition_2.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 2); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition_3.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::DocumentContestDocumentWithSameIdAlreadyPresentError { .. } + ), + _ + )] + ); + + // Now let's run a query for the vote totals + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) + .expect("expected to encode the word dash"); + + let quantum_encoded = bincode::encode_to_vec(Value::Text("quantum".to_string()), config) + .expect("expected to encode the word quantum"); + + let index_name = "parentNameAndLabel".to_string(); + + let query_validation_result = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: domain.name().clone(), + index_name: index_name.clone(), + index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: false, + start_at_identifier_info: None, + count: None, + prove: false, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = query_validation_result.version.expect("expected a version"); + + let Some( + get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( + get_contested_resource_vote_state_response_v0::ContestedResourceContenders { + contenders, + abstain_vote_tally, + lock_vote_tally, + finished_vote_info, + }, + ), + ) = result + else { + panic!("expected contenders") + }; + + assert_eq!(abstain_vote_tally, None); + + assert_eq!(lock_vote_tally, None); + + assert_eq!(finished_vote_info, None); + + assert_eq!(contenders.len(), 2); + + let first_contender = contenders.first().unwrap(); + + let second_contender = contenders.last().unwrap(); + + let first_contender_document = Document::from_bytes( + first_contender + .document + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + let second_contender_document = Document::from_bytes( + second_contender + .document + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + assert_ne!(first_contender_document, second_contender_document); + + assert_eq!(first_contender.identifier, identity_1.id().to_vec()); + + assert_eq!(second_contender.identifier, identity_2.id().to_vec()); + + assert_eq!(first_contender.vote_count, Some(0)); + + assert_eq!(second_contender.vote_count, Some(0)); + + let GetContestedResourceVoteStateResponse { version } = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: domain.name().clone(), + index_name: "parentNameAndLabel".to_string(), + index_values: vec![dash_encoded, quantum_encoded], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: true, + start_at_identifier_info: None, + count: None, + prove: true, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = version.expect("expected a version"); + + let Some(get_contested_resource_vote_state_response_v0::Result::Proof(proof)) = result + else { + panic!("expected contenders") + }; + + let resolved_contested_document_vote_poll_drive_query = + ResolvedContestedDocumentVotePollDriveQuery { + vote_poll: ContestedDocumentResourceVotePollWithContractInfoAllowBorrowed { + contract: DataContractResolvedInfo::BorrowedDataContract(&dpns_contract), + document_type_name: domain.name().clone(), + index_name: index_name.clone(), + index_values: vec![ + Value::Text("dash".to_string()), + Value::Text("quantum".to_string()), + ], + }, + result_type: DocumentsAndVoteTally, + offset: None, + limit: None, + start_at: None, + allow_include_locked_and_abstaining_vote_tally: true, + }; + + let (_root_hash, result) = resolved_contested_document_vote_poll_drive_query + .verify_vote_poll_vote_state_proof(proof.grovedb_proof.as_ref(), platform_version) + .expect("expected to verify proof"); + + let contenders = result.contenders; + assert_eq!(contenders.len(), 2); + + let first_contender = contenders.first().unwrap(); + + let second_contender = contenders.last().unwrap(); + + let first_contender_document = Document::from_bytes( + first_contender + .serialized_document() + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + let second_contender_document = Document::from_bytes( + second_contender + .serialized_document() + .as_ref() + .expect("expected a document") + .as_slice(), + domain, + platform_version, + ) + .expect("expected to get document"); + + assert_ne!(first_contender_document, second_contender_document); + + assert_eq!(first_contender.identity_id(), identity_1.id()); + + assert_eq!(second_contender.identity_id(), identity_2.id()); + + assert_eq!(first_contender.vote_tally(), Some(0)); + + assert_eq!(second_contender.vote_tally(), Some(0)); + } + + #[test] + fn test_that_a_contested_document_can_not_be_added_to_after_a_week() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let platform_state = platform.state.load(); + + let (contender_1, contender_2, dpns_contract) = create_dpns_identity_name_contest( + &mut platform, + &platform_state, + 7, + "quantum", + platform_version, + ); + + perform_votes_multi( + &mut platform, + dpns_contract.as_ref(), + vec![ + (TowardsIdentity(contender_1.id()), 50), + (TowardsIdentity(contender_2.id()), 5), + (ResourceVoteChoice::Abstain, 10), + (ResourceVoteChoice::Lock, 3), + ], + "quantum", + 10, + None, + platform_version, + ); + + let max_join_time = platform_version + .dpp + .validation + .voting + .allow_other_contenders_time_testing_ms; + + fast_forward_to_block(&platform, max_join_time / 2, 900, 42, 0, false); + + let platform_state = platform.state.load(); + + let _contender_3 = add_contender_to_dpns_name_contest( + &mut platform, + &platform_state, + 4, + "quantum", + None, // this should succeed, as we are under a week + platform_version, + ); + + let time_now = platform_version + .dpp + .validation + .voting + .allow_other_contenders_time_testing_ms + + 100; + + fast_forward_to_block(&platform, time_now, 900, 42, 0, false); //more than a week, less than 2 weeks + + let platform_state = platform.state.load(); + + // We expect this to fail + + let time_started = 0; + + let extra_time_used = 3000; // add_contender_to_dpns_name_contest uses this extra time + + let expected_error_message = format!( + "Document Contest for vote_poll ContestedDocumentResourceVotePoll {{ contract_id: GWRSAVFMjXx8HpQFaNJMqBV7MBgMK4br5UESsB4S31Ec, document_type_name: domain, index_name: parentNameAndLabel, index_values: [string dash, string quantum] }} is not joinable V0(ContestedDocumentVotePollStoredInfoV0 {{ finalized_events: [], vote_poll_status: Started(BlockInfo {{ time_ms: {}, height: 0, core_height: 0, epoch: 0 }}), locked_count: 0 }}), it started {} and it is now {}, and you can only join for {}", + time_started + extra_time_used, + time_started + extra_time_used, + time_now + extra_time_used, + max_join_time + ); + + let _contender_4 = add_contender_to_dpns_name_contest( + &mut platform, + &platform_state, + 9, + "quantum", + Some(expected_error_message.as_str()), // this should fail, as we are over a week + platform_version, + ); + } + + #[test] + fn test_that_a_contest_can_not_be_joined_twice_by_the_same_identity() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let platform_state = platform.state.load(); + + let ( + ( + _contender_1, + contender_1_signer, + contender_1_key, + _preorder_document_1, + (mut document_1, _entropy), + ), + (_contender_2, _, _, _, _), + dpns_contract, + ) = create_dpns_name_contest_give_key_info( + &mut platform, + &platform_state, + 7, + "quantum", + platform_version, + ); + + let domain = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type"); + + let mut rng = StdRng::seed_from_u64(89); + + let different_entropy = Bytes32::random_with_rng(&mut rng); + + document_1.set_id(Document::generate_document_id_v0( + dpns_contract.id_ref(), + document_1.owner_id_ref(), + domain.name(), + different_entropy.as_slice(), + )); + + let documents_batch_create_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + document_1, + domain, + different_entropy.0, + &contender_1_key, + 4, + 0, + &contender_1_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_1 = documents_batch_create_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition_1.clone()], + &platform_state, + &BlockInfo::default_with_time( + &platform_state + .last_committed_block_time_ms() + .unwrap_or_default() + + 3000, + ), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let result = processing_result.into_execution_results().remove(0); + + let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result else { + panic!("expected a paid consensus error"); + }; + assert_eq!(consensus_error.to_string(), "An Identity with the id BjNejy4r9QAvLHpQ9Yq6yRMgNymeGZ46d48fJxJbMrfW is already a contestant for the vote_poll ContestedDocumentResourceVotePoll { contract_id: GWRSAVFMjXx8HpQFaNJMqBV7MBgMK4br5UESsB4S31Ec, document_type_name: domain, index_name: parentNameAndLabel, index_values: [string dash, string quantum] }"); + } + + #[test] + fn test_that_a_contested_document_can_not_be_added_if_we_are_locked() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let platform_state = platform.state.load(); + + let (contender_1, contender_2, dpns_contract) = create_dpns_identity_name_contest( + &mut platform, + &platform_state, + 7, + "quantum", + platform_version, + ); + + perform_votes_multi( + &mut platform, + dpns_contract.as_ref(), + vec![ + (TowardsIdentity(contender_1.id()), 3), + (TowardsIdentity(contender_2.id()), 5), + (ResourceVoteChoice::Abstain, 8), + (ResourceVoteChoice::Lock, 10), + ], + "quantum", + 10, + None, + platform_version, + ); + + fast_forward_to_block( + &platform, + platform_version + .dpp + .validation + .voting + .allow_other_contenders_time_testing_ms + / 2, + 900, + 42, + 0, + false, + ); // a time when others can join + + let platform_state = platform.state.load(); + + let _contender_3 = add_contender_to_dpns_name_contest( + &mut platform, + &platform_state, + 4, + "quantum", + None, // this should succeed, as we are under the `platform_version.dpp.validation.voting.allow_other_contenders_time_testing_ms` + platform_version, + ); + + let time_after_distribution_limit = platform_version + .dpp + .voting_versions + .default_vote_poll_time_duration_test_network_ms + + 10_000; // add 10s (3 seconds is used by create_dpns_identity_name_contest) + + fast_forward_to_block(&platform, time_after_distribution_limit, 900, 42, 0, false); // after distribution + + let platform_state = platform.state.load(); + + let transaction = platform.drive.grove.start_transaction(); + + platform + .check_for_ended_vote_polls( + &platform_state, + &platform_state, + &BlockInfo { + time_ms: time_after_distribution_limit, + height: 900, + core_height: 42, + epoch: Default::default(), + }, + Some(&transaction), + platform_version, + ) + .expect("expected to check for ended vote polls"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let platform_state = platform.state.load(); + + // We expect this to fail + + let expected_error_message = format!( + "Document Contest for vote_poll ContestedDocumentResourceVotePoll {{ contract_id: GWRSAVFMjXx8HpQFaNJMqBV7MBgMK4br5UESsB4S31Ec, document_type_name: domain, index_name: parentNameAndLabel, index_values: [string dash, string quantum] }} is currently already locked V0(ContestedDocumentVotePollStoredInfoV0 {{ finalized_events: [ContestedDocumentVotePollStoredInfoVoteEventV0 {{ resource_vote_choices: [FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: TowardsIdentity(BjNejy4r9QAvLHpQ9Yq6yRMgNymeGZ46d48fJxJbMrfW), voters: [2oGomAQc47V9h3mkpyHUPbF74gT2AmoYKg1oSb94Rbwm:1, 4iroeiNBeBYZetCt21kW7FGyczE8WqoqzZ48YAHwyV7R:1, Cdf8V4KGHHd395x5xPJPPrzTKwmp5MqbuszSE2iMzzeP:1] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: TowardsIdentity(FiLk5pGtspYtF65PKsQq3YFr1DEiXPHTZeKjusT6DuqN), voters: [] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: TowardsIdentity(Fv8S6kTbNrRqKC7PR7XcRUoPR59bxNhhggg5mRaNN6ow), voters: [4MK8GWEWX1PturUqjZJefdE4WGrUqz1UQZnbK17ENkeA:1, 5gRudU7b4n8LYkNvhZomv6FtMrP7gvaTvRrHKfaTS22K:1, AfzQBrdwzDuTVdXrMWqQyVvXRWqPMDVjA76hViuGLh6W:1, E75wdFZB22P1uW1wJBJGPgXZuZKLotK7YmbH5wUk5msH:1, G3ZfS2v39x6FuLGnnJ1RNQyy4zn4Wb64KiGAjqj39wUu:1] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: Abstain, voters: [5Ur8tDxJnatfUd9gcVFDde7ptHydujZzJLNTxa6aMYYy:1, 93Gsg14oT9K4FLYmC7N26uS4g5b7JcM1GwGEDeJCCBPJ:1, 96eX4PTjbXRuGHuMzwXdptWFtHcboXbtevk51Jd73pP7:1, AE9xm2mbemDeMxPUzyt35Agq1axRxggVfV4DRLAZp7Qt:1, FbLyu5d7JxEsvSsujj7Wopg57Wrvz9HH3UULCusKpBnF:1, GsubMWb3LH1skUJrcxTmZ7wus1habJcbpb8su8yBVqFY:1, H9UrL7aWaxDmXhqeGMJy7LrGdT2wWb45mc7kQYsoqwuf:1, Hv88mzPZVKq2fnjoUqK56vjzkcmqRHpWE1ME4z1MXDrw:1] }}, FinalizedResourceVoteChoicesWithVoterInfo {{ resource_vote_choice: Lock, voters: [F1oA8iAoyJ8dgCAi2GSPqcNhp9xEuAqhP47yXBDw5QR:1, 2YSjsJUp74MJpm12rdn8wyPR5MY3c322pV8E8siw989u:1, 3fQrmN4PWhthUFnCFTaJqbT2PPGf7MytAyik4eY1DP8V:1, 7r7gnAiZunVLjtSd5ky4yvPpnWTFYbJuQAapg8kDCeNK:1, 86TUE89xNkBDcmshXRD198xjAvMmKecvHbwo6i83AmqA:1, 97iYr4cirPdG176kqa5nvJWT9tsnqxHmENfRnZUgM6SC:1, 99nKfYZL4spsTe9p9pPNhc1JWv9yq4CbPPMPm87a5sgn:1, BYAqFxCVwMKrw5YAQMCFQGiAF2v3YhKRm2EdGfgkYN9G:1, CGKeK3AfdZUxXF3qH9zxp5MR7Z4WvDVqMrU5wjMKqT5C:1, HRPPEX4mdoZAMkg6NLJUgDzN4pSTpiDXEAGcR5JBdiXX:1] }}], start_block: BlockInfo {{ time_ms: 3000, height: 0, core_height: 0, epoch: 0 }}, finalization_block: BlockInfo {{ time_ms: {}, height: 900, core_height: 42, epoch: 0 }}, winner: Locked }}], vote_poll_status: Locked, locked_count: 1 }}), unlocking is possible by paying 400000000000 credits", + time_after_distribution_limit + ); + + let _contender_4 = add_contender_to_dpns_name_contest( + &mut platform, + &platform_state, + 9, + "quantum", + Some(expected_error_message.as_str()), // this should fail, as it is locked + platform_version, + ); + } + + #[test] + fn test_document_creation_on_restricted_document_type_that_only_allows_contract_owner_to_create( + ) { + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_initial_state_structure(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (another_identity, another_identity_signer, another_identity_key) = + setup_identity(&mut platform, 450, dash_to_credits!(0.1)); + + let card_game_path = "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase-creation-restricted-to-owner.json"; + + let platform_state = platform.state.load(); + let platform_version = platform_state + .current_platform_version() + .expect("expected to get current platform version"); + + // let's construct the grovedb structure for the card game data contract + let mut contract = json_document_to_contract(card_game_path, true, platform_version) + .expect("expected to get data contract"); + + contract.set_owner_id(identity.id()); + + platform + .drive + .apply_contract( + &contract, + BlockInfo::default(), + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("expected to apply contract successfully"); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert_eq!( + card_document_type.creation_restriction_mode(), + CreationRestrictionMode::OwnerOnly + ); + + let mut rng = StdRng::seed_from_u64(433); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + // There is no issue because the creator of the contract made the document + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + // Now let's try for another identity + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + another_identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 8.into()); + document.set("defense", 2.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &another_identity_key, + 2, + 0, + &another_identity_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + // There is no issue because the creator of the contract made the document + + assert_eq!(processing_result.invalid_paid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let result = processing_result.into_execution_results().remove(0); + + let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result else { + panic!("expected a paid consensus error"); + }; + assert_eq!(consensus_error.to_string(), "Document Creation on 86LHvdC1Tqx5P97LQUSibGFqf2vnKFpB6VkqQ7oso86e:card is not allowed because of the document type's creation restriction mode Owner Only"); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/deletion.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/deletion.rs new file mode 100644 index 00000000000..5b5aed9c0f0 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/deletion.rs @@ -0,0 +1,746 @@ +use super::*; + +mod deletion_tests { + use super::*; + + #[test] + fn test_document_delete_on_document_type_that_is_mutable_and_can_be_deleted() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + assert!(profile.documents_can_be_deleted()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + altered_document.set("displayName", "Samuel".into()); + altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let documents_batch_deletion_transition = + BatchTransition::new_document_deletion_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition = documents_batch_deletion_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 1711420); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_document_delete_on_document_type_that_is_mutable_and_can_not_be_deleted() { + let mut platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure(); + + let contract_path = "tests/supporting_files/contract/dashpay/dashpay-contract-contact-request-mutable-and-can-not-be-deleted.json"; + + let platform_state = platform.state.load(); + let platform_version = platform_state + .current_platform_version() + .expect("expected to get current platform version"); + + // let's construct the grovedb structure for the card game data contract + let dashpay_contract = json_document_to_contract(contract_path, true, platform_version) + .expect("expected to get data contract"); + platform + .drive + .apply_contract( + &dashpay_contract, + BlockInfo::default(), + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("expected to apply contract successfully"); + + let mut rng = StdRng::seed_from_u64(437); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); + + let contact_request_document_type = dashpay_contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type"); + + assert!(contact_request_document_type.documents_mutable()); + + assert!(!contact_request_document_type.documents_can_be_deleted()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = contact_request_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set( + "toUserId", + Value::Identifier(other_identity.id().to_buffer()), + ); + document.set("recipientKeyIndex", Value::U32(1)); + document.set("senderKeyIndex", Value::U32(1)); + document.set("accountReference", Value::U32(0)); + + let mut altered_document = document.clone(); + + altered_document.set_revision(Some(1)); + altered_document.set("senderKeyIndex", Value::U32(2)); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + contact_request_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let documents_batch_deletion_transition = + BatchTransition::new_document_deletion_transition_from_document( + altered_document, + contact_request_document_type, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_deletion_serialized_transition = documents_batch_deletion_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_deletion_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); + } + + #[test] + fn test_document_delete_on_document_type_that_is_not_mutable_and_can_be_deleted() { + let mut platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure(); + + let contract_path = "tests/supporting_files/contract/dashpay/dashpay-contract-contact-request-not-mutable-and-can-be-deleted.json"; + + let platform_state = platform.state.load(); + let platform_version = platform_state + .current_platform_version() + .expect("expected to get current platform version"); + + // let's construct the grovedb structure for the card game data contract + let dashpay_contract = json_document_to_contract(contract_path, true, platform_version) + .expect("expected to get data contract"); + platform + .drive + .apply_contract( + &dashpay_contract, + BlockInfo::default(), + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("expected to apply contract successfully"); + + let mut rng = StdRng::seed_from_u64(437); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); + + let contact_request_document_type = dashpay_contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type"); + + assert!(!contact_request_document_type.documents_mutable()); + + assert!(contact_request_document_type.documents_can_be_deleted()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = contact_request_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set( + "toUserId", + Value::Identifier(other_identity.id().to_buffer()), + ); + document.set("recipientKeyIndex", Value::U32(1)); + document.set("senderKeyIndex", Value::U32(1)); + document.set("accountReference", Value::U32(0)); + + let mut altered_document = document.clone(); + + altered_document.set_revision(Some(1)); + altered_document.set("senderKeyIndex", Value::U32(2)); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + contact_request_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let documents_batch_deletion_transition = + BatchTransition::new_document_deletion_transition_from_document( + altered_document, + contact_request_document_type, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_deletion_serialized_transition = documents_batch_deletion_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_deletion_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 2762400); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_document_delete_on_document_type_that_is_not_mutable_and_can_not_be_deleted() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(437); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let contact_request_document_type = dashpay_contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type"); + + assert!(!contact_request_document_type.documents_mutable()); + + assert!(!contact_request_document_type.documents_can_be_deleted()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = contact_request_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set( + "toUserId", + Value::Identifier(other_identity.id().to_buffer()), + ); + document.set("recipientKeyIndex", Value::U32(1)); + document.set("senderKeyIndex", Value::U32(1)); + document.set("accountReference", Value::U32(0)); + + let mut altered_document = document.clone(); + + altered_document.set_revision(Some(1)); + altered_document.set("senderKeyIndex", Value::U32(2)); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + contact_request_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let documents_batch_deletion_transition = + BatchTransition::new_document_deletion_transition_from_document( + altered_document, + contact_request_document_type, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_deletion_serialized_transition = documents_batch_deletion_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_deletion_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); + } + + #[test] + fn test_document_delete_that_does_not_yet_exist() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + altered_document.set("displayName", "Samuel".into()); + altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); + + let documents_batch_delete_transition = + BatchTransition::new_document_deletion_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_delete_serialized_transition = documents_batch_delete_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_delete_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 516040); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/dpns.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/dpns.rs new file mode 100644 index 00000000000..284642a9c2b --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/dpns.rs @@ -0,0 +1,917 @@ +use super::*; + +mod dpns_tests { + use super::*; + use crate::execution::validation::state_transition::tests::setup_identity; + use crate::test::helpers::setup::TestPlatformBuilder; + use dpp::dash_to_credits; + use dpp::data_contract::document_type::random_document::{ + DocumentFieldFillSize, DocumentFieldFillType, + }; + use dpp::data_contract::DataContract; + use dpp::platform_value::Bytes32; + use dpp::state_transition::batch_transition::BatchTransition; + use dpp::util::hash::hash_double; + use drive::query::{InternalClauses, OrderClause, WhereClause, WhereOperator}; + use drive::util::test_helpers::setup_contract; + use indexmap::IndexMap; + use platform_version::version::PlatformVersion; + use rand::prelude::StdRng; + use std::collections::BTreeMap; + + #[test] + fn test_dpns_contract_references_with_no_contested_unique_index() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity_1, signer_1, key_1) = + setup_identity(&mut platform, 958, dash_to_credits!(0.5)); + + let (identity_2, signer_2, key_2) = + setup_identity(&mut platform, 93, dash_to_credits!(0.5)); + + let (identity_3, signer_3, key_3) = + setup_identity(&mut platform, 98, dash_to_credits!(0.5)); + + let dashpay_contract = setup_contract( + &platform.drive, + "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", + None, + None, + None::, + None, + None, + ); + + let card_game = setup_contract( + &platform.drive, + "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase.json", + None, + None, + None::, + None, + None, + ); + + let dpns_contract = setup_contract( + &platform.drive, + "tests/supporting_files/contract/dpns/dpns-contract-contested-unique-index-with-contract-id.json", + None, + None, + None::, + None, + None, + ); + + let preorder = dpns_contract + .document_type_for_name("preorder") + .expect("expected a profile document type"); + + assert!(!preorder.documents_mutable()); + assert!(preorder.documents_can_be_deleted()); + assert!(!preorder.documents_transferable().is_transferable()); + + let domain = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type"); + + assert!(!domain.documents_mutable()); + // Deletion is disabled with data trigger + assert!(domain.documents_can_be_deleted()); + assert!(domain.documents_transferable().is_transferable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut preorder_document_1 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut preorder_document_2 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut preorder_document_3 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_3.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_1 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_2 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_3 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_3.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document_1.set("parentDomainName", "dash".into()); + document_1.set("normalizedParentDomainName", "dash".into()); + document_1.set("label", "quantum123".into()); + document_1.set("normalizedLabel", "quantum123".into()); + document_1.set("records.contract", dashpay_contract.id().into()); + document_1.set("subdomainRules.allowSubdomains", false.into()); + + document_2.set("parentDomainName", "dash".into()); + document_2.set("normalizedParentDomainName", "dash".into()); + document_2.set("label", "van89".into()); + document_2.set("normalizedLabel", "van89".into()); + document_2.set("records.contract", card_game.id().into()); + document_2.set("subdomainRules.allowSubdomains", false.into()); + + document_3.set("parentDomainName", "dash".into()); + document_3.set("normalizedParentDomainName", "dash".into()); + document_3.set("label", "jazz65".into()); + document_3.set("normalizedLabel", "jazz65".into()); + document_3.set("records.identity", document_3.owner_id().into()); + document_3.set("subdomainRules.allowSubdomains", false.into()); + + let salt_1: [u8; 32] = rng.gen(); + let salt_2: [u8; 32] = rng.gen(); + let salt_3: [u8; 32] = rng.gen(); + + let mut salted_domain_buffer_1: Vec = vec![]; + salted_domain_buffer_1.extend(salt_1); + salted_domain_buffer_1.extend("quantum123.dash".as_bytes()); + + let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); + + let mut salted_domain_buffer_2: Vec = vec![]; + salted_domain_buffer_2.extend(salt_2); + salted_domain_buffer_2.extend("van89.dash".as_bytes()); + + let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); + + let mut salted_domain_buffer_3: Vec = vec![]; + salted_domain_buffer_3.extend(salt_3); + salted_domain_buffer_3.extend("jazz65.dash".as_bytes()); + + let salted_domain_hash_3 = hash_double(salted_domain_buffer_3); + + preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); + preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); + preorder_document_3.set("saltedDomainHash", salted_domain_hash_3.into()); + + document_1.set("preorderSalt", salt_1.into()); + document_2.set("preorderSalt", salt_2.into()); + document_3.set("preorderSalt", salt_3.into()); + + let documents_batch_create_preorder_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_1, + preorder, + entropy.0, + &key_1, + 2, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_1 = + documents_batch_create_preorder_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_preorder_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_2, + preorder, + entropy.0, + &key_2, + 2, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_2 = + documents_batch_create_preorder_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_preorder_transition_3 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_3, + preorder, + entropy.0, + &key_3, + 2, + 0, + &signer_3, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_3 = + documents_batch_create_preorder_transition_3 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + document_1, + domain, + entropy.0, + &key_1, + 3, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_1 = documents_batch_create_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + document_2, + domain, + entropy.0, + &key_2, + 3, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_2 = documents_batch_create_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_3 = + BatchTransition::new_document_creation_transition_from_document( + document_3.clone(), + domain, + entropy.0, + &key_3, + 3, + 0, + &signer_3, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_3 = documents_batch_create_transition_3 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_preorder_transition_1.clone(), + documents_batch_create_serialized_preorder_transition_2.clone(), + documents_batch_create_serialized_preorder_transition_3.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 3); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_transition_1.clone(), + documents_batch_create_serialized_transition_2.clone(), + documents_batch_create_serialized_transition_3.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 3); + + let mut order_by = IndexMap::new(); + + order_by.insert( + "records.identity".to_string(), + OrderClause { + field: "records.identity".to_string(), + ascending: true, + }, + ); + + let drive_query = DriveDocumentQuery { + contract: &dpns_contract, + document_type: domain, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: Some(WhereClause { + field: "records.identity".to_string(), + operator: WhereOperator::LessThanOrEquals, + value: Value::Bytes32([255; 32]), + }), + equal_clauses: Default::default(), + }, + offset: None, + limit: None, + order_by, + start_at: None, + start_at_included: false, + block_time_ms: None, + }; + + let documents = platform + .drive + .query_documents(drive_query, None, false, None, None) + .expect("expected to get back documents") + .documents_owned(); + + let transient_fields = domain + .transient_fields() + .iter() + .map(|a| a.as_str()) + .collect(); + + assert!(documents + .get(0) + .expect("expected a document") + .is_equal_ignoring_time_based_fields( + &document_3, + Some(transient_fields), + platform_version + ) + .expect("expected to run is equal")); + + let drive_query = DriveDocumentQuery { + contract: &dpns_contract, + document_type: domain, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: None, + equal_clauses: BTreeMap::from([( + "records.identity".to_string(), + WhereClause { + field: "records.identity".to_string(), + operator: WhereOperator::Equal, + value: Value::Null, + }, + )]), + }, + offset: None, + limit: None, + order_by: Default::default(), + start_at: None, + start_at_included: false, + block_time_ms: None, + }; + + let documents = platform + .drive + .query_documents(drive_query, None, false, None, None) + .expect("expected to get back documents") + .documents_owned(); + + // This is normal because we set that we could not query on null + assert_eq!(documents.len(), 0); + } + + #[test] + fn test_dpns_contract_references_with_no_contested_unique_index_null_searchable_true() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity_1, signer_1, key_1) = + setup_identity(&mut platform, 958, dash_to_credits!(0.5)); + + let (identity_2, signer_2, key_2) = + setup_identity(&mut platform, 93, dash_to_credits!(0.5)); + + let (identity_3, signer_3, key_3) = + setup_identity(&mut platform, 98, dash_to_credits!(0.5)); + + let dashpay_contract = setup_contract( + &platform.drive, + "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", + None, + None, + None::, + None, + None, + ); + + let card_game = setup_contract( + &platform.drive, + "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase.json", + None, + None, + None::, + None, + None, + ); + + let dpns_contract = setup_contract( + &platform.drive, + "tests/supporting_files/contract/dpns/dpns-contract-contested-unique-index-with-contract-id-null-searchable-true.json", + None, + None, + None::, + None, + None, + ); + + let preorder = dpns_contract + .document_type_for_name("preorder") + .expect("expected a profile document type"); + + assert!(!preorder.documents_mutable()); + assert!(preorder.documents_can_be_deleted()); + assert!(!preorder.documents_transferable().is_transferable()); + + let domain = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type"); + + assert!(!domain.documents_mutable()); + // Deletion is disabled with data trigger + assert!(domain.documents_can_be_deleted()); + assert!(domain.documents_transferable().is_transferable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut preorder_document_1 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut preorder_document_2 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut preorder_document_3 = preorder + .random_document_with_identifier_and_entropy( + &mut rng, + identity_3.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_1 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_1.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_2 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_2.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + let mut document_3 = domain + .random_document_with_identifier_and_entropy( + &mut rng, + identity_3.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document_1.set("parentDomainName", "dash".into()); + document_1.set("normalizedParentDomainName", "dash".into()); + document_1.set("label", "quantum123".into()); + document_1.set("normalizedLabel", "quantum123".into()); + document_1.set("records.contract", dashpay_contract.id().into()); + document_1.set("subdomainRules.allowSubdomains", false.into()); + + document_2.set("parentDomainName", "dash".into()); + document_2.set("normalizedParentDomainName", "dash".into()); + document_2.set("label", "van89".into()); + document_2.set("normalizedLabel", "van89".into()); + document_2.set("records.contract", card_game.id().into()); + document_2.set("subdomainRules.allowSubdomains", false.into()); + + document_3.set("parentDomainName", "dash".into()); + document_3.set("normalizedParentDomainName", "dash".into()); + document_3.set("label", "jazz65".into()); + document_3.set("normalizedLabel", "jazz65".into()); + document_3.set("records.identity", document_3.owner_id().into()); + document_3.set("subdomainRules.allowSubdomains", false.into()); + + let salt_1: [u8; 32] = rng.gen(); + let salt_2: [u8; 32] = rng.gen(); + let salt_3: [u8; 32] = rng.gen(); + + let mut salted_domain_buffer_1: Vec = vec![]; + salted_domain_buffer_1.extend(salt_1); + salted_domain_buffer_1.extend("quantum123.dash".as_bytes()); + + let salted_domain_hash_1 = hash_double(salted_domain_buffer_1); + + let mut salted_domain_buffer_2: Vec = vec![]; + salted_domain_buffer_2.extend(salt_2); + salted_domain_buffer_2.extend("van89.dash".as_bytes()); + + let salted_domain_hash_2 = hash_double(salted_domain_buffer_2); + + let mut salted_domain_buffer_3: Vec = vec![]; + salted_domain_buffer_3.extend(salt_3); + salted_domain_buffer_3.extend("jazz65.dash".as_bytes()); + + let salted_domain_hash_3 = hash_double(salted_domain_buffer_3); + + preorder_document_1.set("saltedDomainHash", salted_domain_hash_1.into()); + preorder_document_2.set("saltedDomainHash", salted_domain_hash_2.into()); + preorder_document_3.set("saltedDomainHash", salted_domain_hash_3.into()); + + document_1.set("preorderSalt", salt_1.into()); + document_2.set("preorderSalt", salt_2.into()); + document_3.set("preorderSalt", salt_3.into()); + + let documents_batch_create_preorder_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_1, + preorder, + entropy.0, + &key_1, + 2, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_1 = + documents_batch_create_preorder_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_preorder_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_2, + preorder, + entropy.0, + &key_2, + 2, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_2 = + documents_batch_create_preorder_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_preorder_transition_3 = + BatchTransition::new_document_creation_transition_from_document( + preorder_document_3, + preorder, + entropy.0, + &key_3, + 2, + 0, + &signer_3, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_preorder_transition_3 = + documents_batch_create_preorder_transition_3 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_1 = + BatchTransition::new_document_creation_transition_from_document( + document_1, + domain, + entropy.0, + &key_1, + 3, + 0, + &signer_1, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_1 = documents_batch_create_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_2 = + BatchTransition::new_document_creation_transition_from_document( + document_2, + domain, + entropy.0, + &key_2, + 3, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_2 = documents_batch_create_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_create_transition_3 = + BatchTransition::new_document_creation_transition_from_document( + document_3.clone(), + domain, + entropy.0, + &key_3, + 3, + 0, + &signer_3, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition_3 = documents_batch_create_transition_3 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_preorder_transition_1.clone(), + documents_batch_create_serialized_preorder_transition_2.clone(), + documents_batch_create_serialized_preorder_transition_3.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 3); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_create_serialized_transition_1.clone(), + documents_batch_create_serialized_transition_2.clone(), + documents_batch_create_serialized_transition_3.clone(), + ], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 3); + + let mut order_by = IndexMap::new(); + + order_by.insert( + "records.identity".to_string(), + OrderClause { + field: "records.identity".to_string(), + ascending: true, + }, + ); + + let drive_query = DriveDocumentQuery { + contract: &dpns_contract, + document_type: domain, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: Some(WhereClause { + field: "records.identity".to_string(), + operator: WhereOperator::LessThanOrEquals, + value: Value::Bytes32([255; 32]), + }), + equal_clauses: Default::default(), + }, + offset: None, + limit: None, + order_by, + start_at: None, + start_at_included: false, + block_time_ms: None, + }; + + let documents = platform + .drive + .query_documents(drive_query, None, false, None, None) + .expect("expected to get back documents") + .documents_owned(); + + // here we will get all 3 documents + assert_eq!(documents.len(), 3); + + let drive_query = DriveDocumentQuery { + contract: &dpns_contract, + document_type: domain, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: None, + equal_clauses: BTreeMap::from([( + "records.identity".to_string(), + WhereClause { + field: "records.identity".to_string(), + operator: WhereOperator::Equal, + value: Value::Null, + }, + )]), + }, + offset: None, + limit: None, + order_by: Default::default(), + start_at: None, + start_at_included: false, + block_time_ms: None, + }; + + let documents = platform + .drive + .query_documents(drive_query, None, false, None, None) + .expect("expected to get back documents") + .documents_owned(); + + assert_eq!(documents.len(), 2); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/mod.rs new file mode 100644 index 00000000000..8af767d18ac --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/mod.rs @@ -0,0 +1,8 @@ +mod creation; +mod deletion; +mod dpns; +mod nft; +mod replacement; +mod transfer; + +use super::*; diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/nft.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/nft.rs new file mode 100644 index 00000000000..bc195f9a862 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/nft.rs @@ -0,0 +1,2802 @@ +use super::*; + +mod nft_tests { + use super::*; + use crate::test::helpers::fast_forward_to_block::fast_forward_to_block; + #[test] + fn test_document_set_price_on_document_without_ability_to_purchase() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_transfer_only(Transferable::Always); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let result = processing_result.into_execution_results().remove(0); + + let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result else { + panic!("expected a paid consensus error"); + }; + assert_eq!(consensus_error.to_string(), "Document transition action card is in trade mode No Trading that does not support the seller setting the price is not supported"); + } + + #[test] + fn test_document_set_price() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, _, _) = setup_identity(&mut platform, 450, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", receiver.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + // The sender document should have the desired price + + let document = query_sender_results.documents().first().unwrap(); + + let price: Credits = document + .properties() + .get_integer("$price") + .expect("expected to get back price"); + + assert_eq!(dash_to_credits!(0.1), price); + + assert_eq!(document.revision(), Some(2)); + } + + #[test] + fn test_document_set_price_and_purchase() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (purchaser, recipient_signer, recipient_key) = + setup_identity(&mut platform, 450, dash_to_credits!(1.0)); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + assert_eq!(seller_balance, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!( + processing_result + .aggregated_fees() + .clone() + .into_balance_change(identity.id()) + .change(), + &BalanceChange::RemoveFromBalance { + required_removed_balance: 123579000, + desired_removed_balance: 126435860, + } + ); + + let original_creation_cost = 126435860; + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.1) - original_creation_cost + ); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", purchaser.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 216000); // we added 8 bytes for the price + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + None + ); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.1) - original_creation_cost - 2689880 + ); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + // The sender document should have the desired price + + let mut document = query_sender_results.documents_owned().remove(0); + + let price: Credits = document + .properties() + .get_integer("$price") + .expect("expected to get back price"); + + assert_eq!(dash_to_credits!(0.1), price); + + // At this point we want to have the receiver purchase the document + + document.set_revision(Some(3)); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + purchaser.id(), + dash_to_credits!(0.1), //same price as requested + &recipient_key, + 1, // 1 because he's never done anything + 0, + &recipient_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 64611000); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 4080480); + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + Some(22704503) + ); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to have no documents, and the receiver to have 1 + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 1); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.2) - original_creation_cost + 20014623 + ); + + let buyers_balance = platform + .drive + .fetch_identity_balance(purchaser.id().to_buffer(), None, platform_version) + .expect("expected to get purchaser balance") + .expect("expected that purchaser exists"); + + // the buyer paid 0.1, but also storage and processing fees + assert_eq!(buyers_balance, dash_to_credits!(0.9) - 68691480); + } + + #[test] + fn test_document_set_price_and_purchase_different_epoch_documents_mutable() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure(); + + let card_game_path = "tests/supporting_files/contract/crypto-card-game/crypto-card-game-direct-purchase-documents-mutable.json"; + + // let's construct the grovedb structure for the card game data contract + let contract = json_document_to_contract(card_game_path, true, platform_version) + .expect("expected to get data contract"); + platform + .drive + .apply_contract( + &contract, + BlockInfo::default(), + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("expected to apply contract successfully"); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (purchaser, recipient_signer, recipient_key) = + setup_identity(&mut platform, 450, dash_to_credits!(1.0)); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + assert_eq!(seller_balance, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!( + processing_result + .aggregated_fees() + .clone() + .into_balance_change(identity.id()) + .change(), + &BalanceChange::RemoveFromBalance { + required_removed_balance: 138159000, + desired_removed_balance: 141234660, + } + ); + + let original_creation_cost = 141234660; + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.1) - original_creation_cost + ); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", purchaser.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + // now let's modify the document + + fast_forward_to_block(&platform, 500_000, 100, 3, 1, false); //next epoch + + document.set("description", "chopsticks".into()); + document.bump_revision(); + + let documents_batch_update_transition = + BatchTransition::new_document_replacement_transition_from_document( + document.clone(), + card_document_type, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition = documents_batch_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let platform_state = platform.state.load(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!( + processing_result.invalid_paid_count(), + 0, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!( + processing_result.invalid_unpaid_count(), + 0, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!( + processing_result.valid_count(), + 1, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 378000); + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + None + ); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 2717400); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.1) - original_creation_cost - 2717400 - 378000 + ); + + // now let's update price, but first go to next epoch + + fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 2, false); //next epoch + + document.bump_revision(); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 4, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!( + processing_result.invalid_paid_count(), + 0, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!( + processing_result.invalid_unpaid_count(), + 0, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 216000); // we added 8 bytes for the price + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + None + ); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 2721160); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.1) - original_creation_cost - 2717400 - 378000 - 2721160 - 216000 + ); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + // The sender document should have the desired price + + let mut document = query_sender_results.documents_owned().remove(0); + + let price: Credits = document + .properties() + .get_integer("$price") + .expect("expected to get back price"); + + assert_eq!(dash_to_credits!(0.1), price); + + // At this point we want to have the receiver purchase the document at the next epoch + + fast_forward_to_block(&platform, 1_700_000_000, 1200, 42, 3, false); //next epoch + + document.bump_revision(); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + purchaser.id(), + dash_to_credits!(0.1), //same price as requested + &recipient_key, + 1, // 1 because he's never done anything + 0, + &recipient_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!( + processing_result.invalid_paid_count(), + 0, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!( + processing_result.invalid_unpaid_count(), + 0, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!( + processing_result.valid_count(), + 1, + "{:?}", + processing_result.execution_results() + ); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 64611000); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 4345280); + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + Some(52987722) + ); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to have no documents, and the receiver to have 1 + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 1); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.2) - original_creation_cost + 46955162 + ); + + let buyers_balance = platform + .drive + .fetch_identity_balance(purchaser.id().to_buffer(), None, platform_version) + .expect("expected to get purchaser balance") + .expect("expected that purchaser exists"); + + // the buyer paid 0.1, but also storage and processing fees + assert_eq!(buyers_balance, dash_to_credits!(0.9) - 68956280); + } + + #[test] + fn test_document_set_price_and_purchase_different_epoch() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (purchaser, recipient_signer, recipient_key) = + setup_identity(&mut platform, 450, dash_to_credits!(1.0)); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + assert_eq!(seller_balance, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!( + processing_result + .aggregated_fees() + .clone() + .into_balance_change(identity.id()) + .change(), + &BalanceChange::RemoveFromBalance { + required_removed_balance: 123579000, + desired_removed_balance: 126435860, + } + ); + + let original_creation_cost = 126435860; + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.1) - original_creation_cost + ); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", purchaser.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + // now let's update price, but first go to next epoch + + fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 216000); // we added 8 bytes for the price + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + None + ); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.1) - original_creation_cost - 2689880 + ); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + // The sender document should have the desired price + + let mut document = query_sender_results.documents_owned().remove(0); + + let price: Credits = document + .properties() + .get_integer("$price") + .expect("expected to get back price"); + + assert_eq!(dash_to_credits!(0.1), price); + + // At this point we want to have the receiver purchase the document at the next epoch + + fast_forward_to_block(&platform, 1_700_000_000, 1200, 42, 2, false); //next epoch + + document.set_revision(Some(3)); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + purchaser.id(), + dash_to_credits!(0.1), //same price as requested + &recipient_key, + 1, // 1 because he's never done anything + 0, + &recipient_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 64611000); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 4080480); + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + Some(22704503) + ); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to have no documents, and the receiver to have 1 + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 1); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + // the seller should have received 0.1 and already had 0.1 minus the processing fee and storage fee + assert_eq!( + seller_balance, + dash_to_credits!(0.2) - original_creation_cost + 20014623 + ); + + let buyers_balance = platform + .drive + .fetch_identity_balance(purchaser.id().to_buffer(), None, platform_version) + .expect("expected to get purchaser balance") + .expect("expected that purchaser exists"); + + // the buyer paid 0.1, but also storage and processing fees + assert_eq!(buyers_balance, dash_to_credits!(0.9) - 68691480); + } + + #[test] + fn test_document_set_price_and_try_purchase_at_different_amount() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (purchaser, recipient_signer, recipient_key) = + setup_identity(&mut platform, 450, dash_to_credits!(1.0)); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + assert_eq!(seller_balance, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.5), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 1); + + // At this point we want to have the receiver purchase the document + + document.set_revision(Some(3)); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + purchaser.id(), + dash_to_credits!(0.35), //different than requested price + &recipient_key, + 1, // 1 because he's never done anything + 0, + &recipient_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + let result = processing_result.into_execution_results().remove(0); + + let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result else { + panic!("expected a paid consensus error"); + }; + assert_eq!(consensus_error.to_string(), "5rJccTdtJfg6AxSKyrptWUug3PWjveEitTTLqBn9wHdk document can not be purchased for 35000000000, it's sale price is 50000000000 (in credits)"); + } + + #[test] + fn test_document_set_price_and_purchase_from_ones_self() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.5)); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + assert_eq!(seller_balance, dash_to_credits!(0.5)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 1); + + // At this point we want to have the receiver purchase the document + + document.set_revision(Some(3)); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + identity.id(), + dash_to_credits!(0.1), //same price as requested + &key, + 1, // 1 because he's never done anything + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + let result = processing_result.into_execution_results().remove(0); + + let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result else { + panic!("expected a paid consensus error"); + }; + assert_eq!(consensus_error.to_string(), "Document transition action on document type: card identity trying to purchase a document that is already owned by the purchaser is not supported"); + } + + #[test] + fn test_document_set_price_and_purchase_then_try_buy_back() { + // In this test we try to buy back a document after it has been sold + + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (purchaser, recipient_signer, recipient_key) = + setup_identity(&mut platform, 450, dash_to_credits!(1.0)); + + let seller_balance = platform + .drive + .fetch_identity_balance(identity.id().to_buffer(), None, platform_version) + .expect("expected to get identity balance") + .expect("expected that identity exists"); + + assert_eq!(seller_balance, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 1); + + // At this point we want to have the receiver purchase the document + + document.set_revision(Some(3)); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + purchaser.id(), + dash_to_credits!(0.1), //same price as requested + &recipient_key, + 1, // 1 because he's never done anything + 0, + &recipient_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.valid_count(), 1); + + // Let's verify some stuff + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", purchaser.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 1); + + // The sender document should have the desired price + + let mut document = query_receiver_results.documents_owned().remove(0); + + let price: Option = document + .properties() + .get_optional_integer("$price") + .expect("expected to get back price"); + + assert_eq!(price, None); + + assert_eq!(document.owner_id(), purchaser.id()); + + // At this point we want to have the sender to try to buy back the document + + document.set_revision(Some(4)); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + identity.id(), + dash_to_credits!(0.1), //same price as old requested + &key, + 4, // 1 because he's never done anything + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + let result = processing_result.into_execution_results().remove(0); + + let StateTransitionExecutionResult::PaidConsensusError(consensus_error, _) = result else { + panic!("expected a paid consensus error"); + }; + assert_eq!( + consensus_error.to_string(), + "5rJccTdtJfg6AxSKyrptWUug3PWjveEitTTLqBn9wHdk document not for sale" + ); + } + + #[test] + fn test_document_set_price_and_purchase_with_enough_credits_to_buy_but_not_enough_to_pay_for_processing( + ) { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, recipient_signer, recipient_key) = + setup_identity(&mut platform, 450, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", receiver.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 2473880); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + // The sender document should have the desired price + + let mut document = query_sender_results.documents_owned().remove(0); + + let price: Credits = document + .properties() + .get_integer("$price") + .expect("expected to get back price"); + + assert_eq!(dash_to_credits!(0.1), price); + + // At this point we want to have the receiver purchase the document + + document.set_revision(Some(3)); + + let documents_batch_purchase_transition = + BatchTransition::new_document_purchase_transition_from_document( + document.clone(), + card_document_type, + receiver.id(), + dash_to_credits!(0.1), //same price as requested + &recipient_key, + 1, // 1 because he's never done anything + 0, + &recipient_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the purchase"); + + let documents_batch_purchase_serialized_transition = documents_batch_purchase_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_purchase_serialized_transition], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + // nothing can go through because the purchaser doesn't have enough balance + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 1); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 0); + } + + #[test] + fn test_document_set_price_on_not_owned_document() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_nft(TradeMode::DirectPurchase); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (other_identity, other_identity_signer, other_identity_key) = + setup_identity(&mut platform, 450, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + document.set_revision(Some(2)); + + document.set_owner_id(other_identity.id()); // we do this to trick the system + + let documents_batch_update_price_transition = + BatchTransition::new_document_update_price_transition_from_document( + document.clone(), + card_document_type, + dash_to_credits!(0.1), + &other_identity_key, + 1, + 0, + &other_identity_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for the update price"); + + let documents_batch_transfer_serialized_transition = + documents_batch_update_price_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 36200); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + // The sender document should not have the desired price + + let document = query_sender_results.documents().first().unwrap(); + + assert_eq!( + document + .properties() + .get_optional_integer::("$price") + .expect("expected None"), + None + ); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/replacement.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/replacement.rs new file mode 100644 index 00000000000..62bc1440bf6 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/replacement.rs @@ -0,0 +1,1931 @@ +use super::*; + +mod replacement_tests { + use super::*; + use crate::test::helpers::fast_forward_to_block::fast_forward_to_block; + use dpp::identifier::Identifier; + use dpp::prelude::IdentityNonce; + use std::collections::BTreeMap; + + #[test] + fn test_document_replace_on_document_type_that_is_mutable() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + altered_document.set("displayName", "Samuel".into()); + altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let documents_batch_update_transition = + BatchTransition::new_document_replacement_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition = documents_batch_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 1443820); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + fn perform_document_replace_on_profile_after_epoch_change( + original_name: &str, + new_names: Vec<(&str, StorageFlags)>, + ) { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("displayName", original_name.into()); + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + for (i, (new_name, mut expected_flags)) in new_names.into_iter().enumerate() { + document.increment_revision().unwrap(); + document.set("displayName", new_name.into()); + + fast_forward_to_block( + &platform, + 500_000_000 + i as u64 * 1000, + 900 + i as u64, + 42, + 1 + i as u16, + true, + ); //less than a week + + let documents_batch_update_transition = + BatchTransition::new_document_replacement_transition_from_document( + document.clone(), + profile, + &key, + 3 + i as IdentityNonce, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition = documents_batch_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let platform_state = platform.state.load(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!( + processing_result.valid_count(), + 1, + "{:?}", + processing_result.execution_results() + ); + + let drive_query = DriveDocumentQuery::new_primary_key_single_item_query( + &dashpay, + profile, + document.id(), + ); + + let mut documents = platform + .drive + .query_documents_with_flags(drive_query, None, false, None, None) + .expect("expected to get back documents") + .documents_owned(); + + let (_first_document, storage_flags) = documents.remove(0); + + let storage_flags = storage_flags.expect("expected storage flags"); + + expected_flags.set_owner_id(identity.id().to_buffer()); + + assert_eq!(storage_flags, expected_flags); + } + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size() { + perform_document_replace_on_profile_after_epoch_change( + "Sam", + vec![( + "Samuel", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 6)]), + Identifier::default().to_buffer(), + ), + )], + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_mutable_different_epoch_smaller_size() { + perform_document_replace_on_profile_after_epoch_change( + "Sam", + vec![( + "S", + StorageFlags::SingleEpochOwned(0, Identifier::default().to_buffer()), + )], + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_mutable_different_epoch_same_size() { + perform_document_replace_on_profile_after_epoch_change( + "Sam", + vec![( + "Max", + StorageFlags::SingleEpochOwned(0, Identifier::default().to_buffer()), + )], + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_bigger_size( + ) { + perform_document_replace_on_profile_after_epoch_change( + "Sam", + vec![ + ( + "Samuel", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 6)]), + Identifier::default().to_buffer(), + ), + ), + ( + "SamuelW", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 6), (2, 4)]), + Identifier::default().to_buffer(), + ), + ), + ], + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_bigger_size_by_3_bytes( + ) { + perform_document_replace_on_profile_after_epoch_change( + "Sam", + vec![ + ( + "Samuel", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 6)]), + Identifier::default().to_buffer(), + ), + ), + ( + "SamuelWes", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 6), (2, 6)]), + Identifier::default().to_buffer(), + ), + ), + ], + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_smaller_size( + ) { + // In this case we start with the size Samuell Base epoch 0 epoch 1 added 7 bytes + // Then we try to update it to Sami Base epoch 2 + // Epoch 1 added 7 bytes is itself 3 bytes + // Sami is 3 bytes less than Samuell + // First iteration will say we should remove 6 bytes + // We need to start by calculating the cost of the original storage flags, in this case 5 bytes + // Then we need to calculate the cost of the new storage flags, in this case 2 bytes + // We should do the difference, then apply that difference in the combination function + perform_document_replace_on_profile_after_epoch_change( + "Sam", + vec![ + ( + "Samuell", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 7)]), + Identifier::default().to_buffer(), + ), + ), + ( + "Sami", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 4)]), + Identifier::default().to_buffer(), + ), + ), + ], + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_mutable_different_epoch_bigger_size_then_back_to_original( + ) { + perform_document_replace_on_profile_after_epoch_change( + "Sam", + vec![ + ( + "Samuel", + StorageFlags::MultiEpochOwned( + 0, + BTreeMap::from([(1, 6)]), + Identifier::default().to_buffer(), + ), + ), + ( + "Sam", + StorageFlags::SingleEpochOwned(0, Identifier::default().to_buffer()), + ), + ], + ); + } + + #[test] + fn test_document_replace_on_document_type_that_is_not_mutable() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(437); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (other_identity, ..) = setup_identity(&mut platform, 495, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let contact_request_document_type = dashpay_contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type"); + + assert!(!contact_request_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = contact_request_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set( + "toUserId", + Value::Identifier(other_identity.id().to_buffer()), + ); + document.set("recipientKeyIndex", Value::U32(1)); + document.set("senderKeyIndex", Value::U32(1)); + document.set("accountReference", Value::U32(0)); + + let mut altered_document = document.clone(); + + altered_document.set_revision(Some(1)); + altered_document.set("senderKeyIndex", Value::U32(2)); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + contact_request_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let documents_batch_update_transition = + BatchTransition::new_document_replacement_transition_from_document( + altered_document, + contact_request_document_type, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition = documents_batch_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 41880); + } + + #[test] + fn test_document_replace_on_document_type_that_is_not_mutable_but_is_transferable() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_transfer_only(Transferable::Always); + + let mut rng = StdRng::seed_from_u64(435); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, _, _) = setup_identity(&mut platform, 452, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", receiver.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + document.set("attack", 6.into()); + document.set("defense", 0.into()); + + let documents_batch_transfer_transition = + BatchTransition::new_document_replacement_transition_from_document( + document, + card_document_type, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for transfer"); + + let documents_batch_transfer_serialized_transition = documents_batch_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + } + + #[test] + fn test_document_replace_that_does_not_yet_exist() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + altered_document.set("displayName", "Samuel".into()); + altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); + + let documents_batch_update_transition = + BatchTransition::new_document_replacement_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition = documents_batch_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 516040); + } + + #[test] + fn test_double_document_replace() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + altered_document.set("displayName", "Samuel".into()); + altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); + + let mut altered_document_2 = altered_document.clone(); + + altered_document_2.increment_revision().unwrap(); + altered_document_2.set("displayName", "Ody".into()); + altered_document_2.set("avatarUrl", "http://test.com/drapes.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let receiver_documents_sql_string = "select * from profile".to_string(); + + let query_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &dashpay, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); + + let documents_batch_update_transition_1 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_1 = documents_batch_update_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_update_transition_2 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document_2, + profile, + &key, + 4, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_2 = documents_batch_update_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![ + documents_batch_update_serialized_transition_1.clone(), + documents_batch_update_serialized_transition_2.clone(), + ], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 2); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/drap[...(26)] displayName:string Ody publicMessage:string 8XG7KBGNvm2 "); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_double_document_replace_different_height_same_epoch() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + altered_document.set("displayName", "Samuel".into()); + altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); + + let mut altered_document_2 = altered_document.clone(); + + altered_document_2.increment_revision().unwrap(); + altered_document_2.set("displayName", "Ody".into()); + altered_document_2.set("avatarUrl", "http://test.com/drapes.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let receiver_documents_sql_string = "select * from profile".to_string(); + + let query_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &dashpay, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); + + fast_forward_to_block(&platform, 1_400_000_000, 901, 43, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let documents_batch_update_transition_1 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_1 = documents_batch_update_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_update_transition_2 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document_2, + profile, + &key, + 4, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_2 = documents_batch_update_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition_1.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-17 04:53:20 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/cat.[...(23)] displayName:string Samuel publicMessage:string 8XG7KBGNvm2 "); + + fast_forward_to_block(&platform, 1_600_000_000, 902, 44, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition_2.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-19 12:26:40 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/drap[...(26)] displayName:string Ody publicMessage:string 8XG7KBGNvm2 "); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_double_document_replace_no_change_different_height_same_epoch() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + + let mut altered_document_2 = altered_document.clone(); + + altered_document_2.increment_revision().unwrap(); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let receiver_documents_sql_string = "select * from profile".to_string(); + + let query_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &dashpay, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); + + fast_forward_to_block(&platform, 1_400_000_000, 901, 43, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let documents_batch_update_transition_1 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_1 = documents_batch_update_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_update_transition_2 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document_2, + profile, + &key, + 4, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_2 = documents_batch_update_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition_1.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-17 04:53:20 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); + + fast_forward_to_block(&platform, 1_600_000_000, 902, 44, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition_2.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-19 12:26:40 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_double_document_replace_different_height_different_epoch() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(433); + + fast_forward_to_block(&platform, 1_200_000_000, 900, 42, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let dashpay = platform.drive.cache.system_data_contracts.load_dashpay(); + let dashpay_contract = dashpay.clone(); + + let profile = dashpay_contract + .document_type_for_name("profile") + .expect("expected a profile document type"); + + assert!(profile.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = profile + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("avatarUrl", "http://test.com/bob.jpg".into()); + + let mut altered_document = document.clone(); + + altered_document.increment_revision().unwrap(); + altered_document.set("displayName", "Samuel".into()); + altered_document.set("avatarUrl", "http://test.com/cat.jpg".into()); + + let mut altered_document_2 = altered_document.clone(); + + altered_document_2.increment_revision().unwrap(); + altered_document_2.set("displayName", "Ody".into()); + altered_document_2.set("avatarUrl", "http://test.com/drapes.jpg".into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document, + profile, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let receiver_documents_sql_string = "select * from profile".to_string(); + + let query_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &dashpay, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-14 21:20:00 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/bob.[...(23)] displayName:string QBwBNNXXYCngB0er publicMessage:string 8XG7KBGNvm2 "); + + fast_forward_to_block(&platform, 1_400_000_000, 901, 43, 1, false); //next epoch + + let platform_state = platform.state.load(); + + let documents_batch_update_transition_1 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document, + profile, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_1 = documents_batch_update_transition_1 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let documents_batch_update_transition_2 = + BatchTransition::new_document_replacement_transition_from_document( + altered_document_2, + profile, + &key, + 4, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_update_serialized_transition_2 = documents_batch_update_transition_2 + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition_1.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-17 04:53:20 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/cat.[...(23)] displayName:string Samuel publicMessage:string 8XG7KBGNvm2 "); + + fast_forward_to_block(&platform, 1_600_000_000, 905, 44, 2, true); //next epoch + + let platform_state = platform.state.load(); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_update_serialized_transition_2.clone()], + &platform_state, + platform_state.last_block_info(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + let query_sender_results = platform + .drive + .query_documents(query_documents.clone(), None, false, None, None) + .expect("expected query result"); + + let document = query_sender_results + .documents() + .first() + .expect("expected a document"); + + assert_eq!(document.to_string(), "v0 : id:GcviwUsEr9Ji4rCrnnsgmVAghNaVPDumsfcagvBbBy45 owner_id:CisQdz2ej7EwWv8JbetSXBNsV4xsf8QsSS8tqp4tEf7V created_at:1970-01-14 21:20:00 updated_at:1970-01-19 12:26:40 avatarFingerprint:bytes d7b0e2b357c10312 avatarHash:bytes32 YonaRoE0hMgat53AYt5LTlQlIkKLReGpB7xNAqJ5HM8= avatarUrl:string http://test.com/drap[...(26)] displayName:string Ody publicMessage:string 8XG7KBGNvm2 "); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/transfer.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/transfer.rs new file mode 100644 index 00000000000..f45b6c25412 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/document/transfer.rs @@ -0,0 +1,1019 @@ +use super::*; + +mod transfer_tests { + use super::*; + + #[test] + fn test_document_transfer_on_document_type_that_is_transferable_that_has_no_owner_indices() { + let mut platform = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure(); + + let card_game_path = "tests/supporting_files/contract/crypto-card-game/crypto-card-game-all-transferable-no-owner-indexes.json"; + + let platform_state = platform.state.load(); + let platform_version = platform_state + .current_platform_version() + .expect("expected to get current platform version"); + + // let's construct the grovedb structure for the card game data contract + let contract = json_document_to_contract(card_game_path, true, platform_version) + .expect("expected to get data contract"); + platform + .drive + .apply_contract( + &contract, + BlockInfo::default(), + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("expected to apply contract successfully"); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, _, _) = setup_identity(&mut platform, 450, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + document.set_revision(Some(2)); + + let documents_batch_transfer_transition = + BatchTransition::new_document_transfer_transition_from_document( + document, + card_document_type, + receiver.id(), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for transfer"); + + let documents_batch_transfer_serialized_transition = documents_batch_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 0); // There is no storage fee, as there are no indexes that will change + + assert_eq!(processing_result.aggregated_fees().processing_fee, 1985420); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_document_transfer_on_document_type_that_is_transferable() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_transfer_only(Transferable::Always); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, _, _) = setup_identity(&mut platform, 450, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", receiver.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + let documents_batch_transfer_transition = + BatchTransition::new_document_transfer_transition_from_document( + document, + card_document_type, + receiver.id(), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for transfer"); + + let documents_batch_transfer_serialized_transition = documents_batch_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().storage_fee, 37341000); // 1383 bytes added + + // todo: we should expect these numbers to be closer + + assert_eq!( + processing_result + .aggregated_fees() + .fee_refunds + .calculate_refunds_amount_for_identity(identity.id()), + Some(14992395) + ); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 3369260); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to have no documents, and the receiver to have 1 + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 1); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } + + #[test] + fn test_document_transfer_on_document_type_that_is_not_transferable() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_transfer_only(Transferable::Never); + + let mut rng = StdRng::seed_from_u64(435); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, _, _) = setup_identity(&mut platform, 452, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", receiver.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + let documents_batch_transfer_transition = + BatchTransition::new_document_transfer_transition_from_document( + document, + card_document_type, + receiver.id(), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for transfer"); + + let documents_batch_transfer_serialized_transition = documents_batch_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 445700); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to still have their document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + } + + #[test] + fn test_document_transfer_that_does_not_yet_exist() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_transfer_only(Transferable::Never); + + let mut rng = StdRng::seed_from_u64(435); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, _, _) = setup_identity(&mut platform, 452, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", receiver.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 0 documents, and the receiver to also have none + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + let documents_batch_transfer_transition = + BatchTransition::new_document_transfer_transition_from_document( + document, + card_document_type, + receiver.id(), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for transfer"); + + let documents_batch_transfer_serialized_transition = documents_batch_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 36200); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to still have no document, and the receiver to have none as well + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 0); + } + + #[test] + fn test_document_delete_after_transfer() { + let platform_version = PlatformVersion::latest(); + let (mut platform, contract) = TestPlatformBuilder::new() + .build_with_mock_rpc() + .set_initial_state_structure() + .with_crypto_card_game_transfer_only(Transferable::Always); + + let mut rng = StdRng::seed_from_u64(433); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = setup_identity(&mut platform, 958, dash_to_credits!(0.1)); + + let (receiver, recipient_signer, recipient_key) = + setup_identity(&mut platform, 450, dash_to_credits!(0.1)); + + let card_document_type = contract + .document_type_for_name("card") + .expect("expected a profile document type"); + + assert!(!card_document_type.documents_mutable()); + + let entropy = Bytes32::random_with_rng(&mut rng); + + let mut document = card_document_type + .random_document_with_identifier_and_entropy( + &mut rng, + identity.id(), + entropy, + DocumentFieldFillType::DoNotFillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + platform_version, + ) + .expect("expected a random document"); + + document.set("attack", 4.into()); + document.set("defense", 7.into()); + + let documents_batch_create_transition = + BatchTransition::new_document_creation_transition_from_document( + document.clone(), + card_document_type, + entropy.0, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_eq!(processing_result.valid_count(), 1); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let sender_documents_sql_string = + format!("select * from card where $ownerId == '{}'", identity.id()); + + let query_sender_identity_documents = DriveDocumentQuery::from_sql_expr( + sender_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let receiver_documents_sql_string = + format!("select * from card where $ownerId == '{}'", receiver.id()); + + let query_receiver_identity_documents = DriveDocumentQuery::from_sql_expr( + receiver_documents_sql_string.as_str(), + &contract, + Some(&platform.config.drive), + ) + .expect("expected document query"); + + let query_sender_results = platform + .drive + .query_documents( + query_sender_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents( + query_receiver_identity_documents.clone(), + None, + false, + None, + None, + ) + .expect("expected query result"); + + // We expect the sender to have 1 document, and the receiver to have none + assert_eq!(query_sender_results.documents().len(), 1); + + assert_eq!(query_receiver_results.documents().len(), 0); + + document.set_revision(Some(2)); + + let documents_batch_transfer_transition = + BatchTransition::new_document_transfer_transition_from_document( + document.clone(), + card_document_type, + receiver.id(), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition for transfer"); + + let documents_batch_transfer_serialized_transition = documents_batch_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default_with_time(50000000), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 0); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 1); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 3730120); + + let query_sender_results = platform + .drive + .query_documents(query_sender_identity_documents, None, false, None, None) + .expect("expected query result"); + + let query_receiver_results = platform + .drive + .query_documents(query_receiver_identity_documents, None, false, None, None) + .expect("expected query result"); + + // We expect the sender to have no documents, and the receiver to have 1 + assert_eq!(query_sender_results.documents().len(), 0); + + assert_eq!(query_receiver_results.documents().len(), 1); + + // Now let's try to delete the transferred document + + document.set_owner_id(receiver.id()); + + let documents_batch_deletion_transition = + BatchTransition::new_document_deletion_transition_from_document( + document, + card_document_type, + &recipient_key, + 2, + 0, + &recipient_signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_deletion_serialized_transition = documents_batch_deletion_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_deletion_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + assert_eq!(processing_result.invalid_paid_count(), 1); + + assert_eq!(processing_result.invalid_unpaid_count(), 0); + + assert_eq!(processing_result.valid_count(), 0); + + assert_eq!(processing_result.aggregated_fees().processing_fee, 571240); + + let issues = platform + .drive + .grove + .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) + .expect("expected to have no issues"); + + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/mod.rs new file mode 100644 index 00000000000..50cae04a67b --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/mod.rs @@ -0,0 +1,44 @@ +mod document; +mod token; + +use crate::platform_types::platform_state::v0::PlatformStateV0Methods; + +use dpp::data_contract::accessors::v0::DataContractV0Getters; +use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; +use dpp::data_contract::document_type::random_document::{ + CreateRandomDocument, DocumentFieldFillSize, DocumentFieldFillType, +}; + +use dpp::document::document_methods::DocumentMethodsV0; + +use dpp::document::{DocumentV0Getters, DocumentV0Setters}; + +use dpp::identity::accessors::IdentityGettersV0; + +use dpp::platform_value::btreemap_extensions::BTreeValueMapHelper; + +use dpp::serialization::PlatformSerializable; +use dpp::state_transition::batch_transition::methods::v0::DocumentsBatchTransitionMethodsV0; + +use drive::drive::document::query::QueryDocumentsOutcomeV0Methods; +use drive::drive::document::query::QueryDocumentsWithFlagsOutcomeV0Methods; + +use crate::execution::validation::state_transition::tests::setup_identity; +use crate::platform_types::state_transitions_processing_result::StateTransitionExecutionResult; +use crate::test::helpers::setup::TestPlatformBuilder; +use assert_matches::assert_matches; +use dpp::block::block_info::BlockInfo; +use dpp::dash_to_credits; +use dpp::document::transfer::Transferable; +use dpp::fee::fee_result::BalanceChange; +use dpp::fee::Credits; +use dpp::nft::TradeMode; +use dpp::platform_value::{Bytes32, Value}; +use dpp::state_transition::batch_transition::BatchTransition; +use dpp::tests::json_document::json_document_to_contract; +use dpp::version::PlatformVersion; +use drive::query::DriveDocumentQuery; +use drive::util::storage_flags::StorageFlags; +use rand::prelude::StdRng; +use rand::Rng; +use rand::SeedableRng; diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/distribution/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/distribution/mod.rs new file mode 100644 index 00000000000..8401d81d89e --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/distribution/mod.rs @@ -0,0 +1,3 @@ +mod perpetual; + +use super::*; diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/distribution/perpetual.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/distribution/perpetual.rs new file mode 100644 index 00000000000..251fd6929b1 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/distribution/perpetual.rs @@ -0,0 +1,99 @@ +use super::*; +mod perpetual_distribution { + use dpp::data_contract::associated_token::token_distribution_key::TokenDistributionType; + use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_function::DistributionFunction; + use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_type::RewardDistributionType; + use dpp::data_contract::associated_token::token_perpetual_distribution::TokenPerpetualDistribution; + use dpp::data_contract::associated_token::token_perpetual_distribution::v0::TokenPerpetualDistributionV0; + use crate::test::helpers::fast_forward_to_block::fast_forward_to_block; + use super::*; + #[test] + #[ignore] + fn test_token_perpetual_distribution_block_claim_linear() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration + .distribution_rules_mut() + .set_perpetual_distribution(Some(TokenPerpetualDistribution::V0( + TokenPerpetualDistributionV0 { + distribution_type: RewardDistributionType::BlockBasedDistribution { + interval: 10, + function: DistributionFunction::FixedAmount { amount: 50 }, + start: None, + end: None, + }, + distribution_recipient: Default::default(), + }, + ))); + }), + None, + platform_version, + ); + + fast_forward_to_block(&platform, 10_200_000_000, 40, 42, 1, false); //25 years later + + let claim_transition = BatchTransition::new_token_claim_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenDistributionType::Perpetual, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let claim_serialized_transition = claim_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![claim_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/mod.rs new file mode 100644 index 00000000000..ce0ca0b86c8 --- /dev/null +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/tests/token/mod.rs @@ -0,0 +1,6092 @@ +mod distribution; +use super::*; +use crate::execution::validation::state_transition::tests::create_token_contract_with_owner_identity; +use crate::platform_types::state_transitions_processing_result::StateTransitionExecutionResult; +use crate::test::helpers::setup::TestPlatformBuilder; +use dpp::block::block_info::BlockInfo; +use dpp::consensus::basic::BasicError; +use dpp::consensus::state::state_error::StateError; +use dpp::consensus::ConsensusError; +use dpp::dash_to_credits; +use dpp::data_contract::accessors::v1::DataContractV1Getters; +use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Setters; +use dpp::data_contract::associated_token::token_configuration::TokenConfiguration; +use dpp::data_contract::associated_token::token_configuration_convention::v0::TokenConfigurationConventionV0; +use dpp::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; +use dpp::data_contract::associated_token::token_configuration_localization::TokenConfigurationLocalization; +use dpp::data_contract::associated_token::token_distribution_rules::accessors::v0::TokenDistributionRulesV0Setters; +use dpp::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; +use dpp::data_contract::change_control_rules::v0::ChangeControlRulesV0; +use dpp::data_contract::change_control_rules::ChangeControlRules; +use dpp::data_contract::group::v0::GroupV0; +use dpp::data_contract::group::Group; +use dpp::group::GroupStateTransitionInfo; +use dpp::group::GroupStateTransitionInfoStatus; +use dpp::identifier::Identifier; +use dpp::state_transition::batch_transition::methods::v1::DocumentsBatchTransitionMethodsV1; +use dpp::state_transition::batch_transition::BatchTransition; +use dpp::state_transition::batch_transition::TokenConfigUpdateTransition; +use rand::prelude::StdRng; + +mod token_tests { + use super::*; + + mod token_mint_tests { + use super::*; + + mod token_mint_tests_normal_scenarios { + use super::*; + + #[test] + fn test_token_mint_by_owner_allowed_sending_to_self() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(101337)); + } + + #[test] + fn test_token_mint_by_owner_can_not_mint_past_max_supply() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply(Some(1000000)); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 2000000, + Some(identity.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::TokenMintPastMaxSupplyError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + } + + #[test] + fn test_token_mint_by_owner_allowed_sending_to_other() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (receiver, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(receiver.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + receiver.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(1337)); + } + + #[test] + fn test_token_mint_sending_to_non_existing_identity_causes_error() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let receiver = Identifier::random_with_rng(&mut rng); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(receiver), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::RecipientIdentityDoesNotExistError( + _ + )), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + receiver.to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_by_owner_no_destination_causes_error() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + None, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::BasicError( + BasicError::DestinationIdentityForTokenMintingNotSetError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + } + + mod token_mint_tests_no_recipient_minting { + use super::*; + + #[test] + fn test_token_mint_by_owned_id_allowed_sending_to_self() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration + .distribution_rules_mut() + .set_minting_allow_choosing_destination(false); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::BasicError( + BasicError::ChoosingTokenMintRecipientNotAllowedError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + } + + #[test] + fn test_token_mint_by_owned_id_allowed_sending_to_other() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (receiver, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration + .distribution_rules_mut() + .set_minting_allow_choosing_destination(false); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(receiver.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::BasicError( + BasicError::ChoosingTokenMintRecipientNotAllowedError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + receiver.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_by_owned_id_no_destination_causes_error() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration + .distribution_rules_mut() + .set_minting_allow_choosing_destination(false); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + None, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::BasicError( + BasicError::DestinationIdentityForTokenMintingNotSetError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + } + + mod token_mint_tests_contract_has_recipient { + use super::*; + + #[test] + fn test_token_mint_by_owned_id_allowed_sending_to_self() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration + .distribution_rules_mut() + .set_minting_allow_choosing_destination(false); + token_configuration + .distribution_rules_mut() + .set_new_tokens_destination_identity(Some(identity.id())); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::BasicError( + BasicError::ChoosingTokenMintRecipientNotAllowedError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + } + + #[test] + fn test_token_mint_by_owned_id_allowed_sending_to_other() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (receiver, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration + .distribution_rules_mut() + .set_minting_allow_choosing_destination(false); + token_configuration + .distribution_rules_mut() + .set_new_tokens_destination_identity(Some(identity.id())); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(receiver.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::BasicError( + BasicError::ChoosingTokenMintRecipientNotAllowedError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + receiver.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_by_owned_id_no_set_destination_should_use_contracts() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration + .distribution_rules_mut() + .set_minting_allow_choosing_destination(false); + token_configuration + .distribution_rules_mut() + .set_new_tokens_destination_identity(Some(identity.id())); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + None, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(101337)); + } + } + + mod token_mint_tests_authorization_scenarios { + use super::*; + use dpp::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; + use dpp::data_contract::change_control_rules::v0::ChangeControlRulesV0; + use dpp::data_contract::change_control_rules::ChangeControlRules; + use dpp::data_contract::group::v0::GroupV0; + use dpp::data_contract::group::Group; + use dpp::group::{GroupStateTransitionInfo, GroupStateTransitionInfoStatus}; + use dpp::state_transition::batch_transition::TokenMintTransition; + + #[test] + fn test_token_mint_by_owner_sending_to_self_minting_not_allowed() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::NoOne, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + } + + #[test] + fn test_token_mint_by_owner_sending_to_self_minting_only_allowed_by_group() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 5), (identity_2.id(), 5)].into(), + required_power: 10, + }), + )] + .into(), + ), + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + } + + #[test] + fn test_token_mint_by_owner_sending_to_self_minting_only_allowed_by_group_enough_member_power( + ) { + // We are using a group, but our member alone has enough power in the group to do the action + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 5), (identity_2.id(), 1)].into(), + required_power: 5, + }), + )] + .into(), + ), + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = + documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(101337)); + } + + #[test] + fn test_token_mint_by_owner_requires_group_other_member() { + // We are using a group, and two members need to sign for the event to happen + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer2, key2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (identity_2.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_mint_serialized_transition = token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + // Now we need to get the second identity to also sign it + let action_id = TokenMintTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + 1337, + ); + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key2, + 2, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(101337)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_by_owner_requires_group_resubmitting_causes_error() { + // We are using a group, and two members need to sign for the event to happen + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (identity_2.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_mint_serialized_transition = token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + // Now we need to get the second identity to also sign it, but we are going to resubmit with first + // This will create an error + let action_id = TokenMintTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + 1337, + ); + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::GroupActionAlreadySignedByIdentityError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_by_owner_requires_group_other_member_resubmitting_causes_error() { + // We are using a group, and two members need to sign for the event to happen + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer2, key2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_3, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [ + (identity.id(), 1), + (identity_2.id(), 1), + (identity_3.id(), 1), + ] + .into(), + required_power: 3, + }), + )] + .into(), + ), + platform_version, + ); + + let token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_mint_serialized_transition = token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + // Now we need to get the second identity to also sign it + let action_id = TokenMintTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + 1337, + ); + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key2, + 2, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + + // Now we need to get the second identity to sign it again to cause the error + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key2, + 3, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::GroupActionAlreadySignedByIdentityError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_by_owner_requires_group_other_member_submitting_after_completion_causes_error( + ) { + // We are using a group, and two members need to sign for the event to happen + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer2, key2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_3, signer3, key3) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [ + (identity.id(), 1), + (identity_2.id(), 1), + (identity_3.id(), 1), + ] + .into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_mint_serialized_transition = token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + // Now we need to get the second identity to also sign it + let action_id = TokenMintTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + 1337, + ); + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key2, + 2, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(101337)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + + // Now we need to get the second identity to sign it again to cause the error + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity_3.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key3, + 2, + 0, + &signer3, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::GroupActionAlreadyCompletedError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(101337)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_by_owner_requires_group_proposer_not_in_group() { + // We are using a group, and two members need to sign for the event to happen + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_3, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity_3.id(), 1), (identity_2.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_mint_serialized_transition = token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::IdentityNotMemberOfGroupError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + } + + #[test] + fn test_token_mint_by_owner_requires_group_other_signer_not_part_of_group() { + // We are using a group, and two members need to sign for the event to happen + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer2, key2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_3, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (identity_3.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_mint_serialized_transition = token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + // Now we need to get the second identity to also sign it + let action_id = TokenMintTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + 1337, + ); + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key2, + 2, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::IdentityNotMemberOfGroupError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_mint_other_signer_going_first_causes_error() { + // We are using a group, and the second member gets a bit hasty and signs first + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer2, key2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (identity_2.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + // The second identity to also sign it + let action_id = TokenMintTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + 1337, + ); + let confirm_token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key2, + 2, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let confirm_token_mint_serialized_transition = confirm_token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![confirm_token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::GroupActionDoesNotExistError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + } + } + + mod token_burn_tests { + use super::*; + + #[test] + fn test_token_burn() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_burn_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 100000 - 1337; + assert_eq!(token_balance, Some(expected_amount)); + } + + #[test] + fn test_token_burn_trying_to_burn_more_than_we_have() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_burn_transition( + token_id, + identity.id(), + contract.id(), + 0, + 200000, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::IdentityDoesNotHaveEnoughTokenBalanceError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); // nothing was burned + } + + #[test] + fn test_token_burn_gives_error_if_trying_to_burn_from_not_allowed_identity() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (contract_owner_identity, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + contract_owner_identity.id(), + None::, + None, + platform_version, + ); + + let documents_batch_create_transition = BatchTransition::new_token_burn_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let documents_batch_create_serialized_transition = documents_batch_create_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![documents_batch_create_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + contract_owner_identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + } + + mod token_transfer_tests { + use dpp::data_contract::change_control_rules::authorized_action_takers::AuthorizedActionTakers; + use dpp::data_contract::change_control_rules::ChangeControlRules; + use dpp::data_contract::change_control_rules::v0::ChangeControlRulesV0; + use dpp::data_contract::group::Group; + use dpp::state_transition::batch_transition::TokenMintTransition; + use dpp::data_contract::group::v0::GroupV0; + use dpp::group::{GroupStateTransitionInfo, GroupStateTransitionInfoStatus}; + use dpp::identity::SecurityLevel; + use dpp::state_transition::batch_transition::accessors::DocumentsBatchTransitionAccessorsV0; + use dpp::state_transition::batch_transition::batched_transition::token_transition::TokenTransition; + use dpp::state_transition::StateTransition; + use dpp::state_transition::batch_transition::batched_transition::BatchedTransitionMutRef; + use dpp::state_transition::batch_transition::token_base_transition::token_base_transition_accessors::TokenBaseTransitionAccessors; + use dpp::state_transition::batch_transition::token_base_transition::v0::v0_methods::TokenBaseTransitionV0Methods; + use super::*; + + #[test] + fn test_token_transfer() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (recipient, _, _) = setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let token_transfer_transition = BatchTransition::new_token_transfer_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + recipient.id(), + None, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_transfer_serialized_transition = token_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 100000 - 1337; + assert_eq!(token_balance, Some(expected_amount)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 1337; + assert_eq!(token_balance, Some(expected_amount)); + } + + #[test] + fn test_token_transfer_to_ourself_should_fail() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let token_transfer_transition = BatchTransition::new_token_transfer_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + identity.id(), + None, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_transfer_serialized_transition = token_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::UnpaidConsensusError( + ConsensusError::BasicError(BasicError::TokenTransferToOurselfError(_)) + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, Some(100000)); + } + + #[test] + fn test_token_transfer_trying_to_send_more_than_we_have() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (recipient, _, _) = setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + None::, + None, + platform_version, + ); + + let token_transfer_transition = BatchTransition::new_token_transfer_transition( + token_id, + identity.id(), + contract.id(), + 0, + 200000, + recipient.id(), + None, + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_transfer_serialized_transition = token_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::IdentityDoesNotHaveEnoughTokenBalanceError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 100000; + assert_eq!(token_balance, Some(expected_amount)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + + #[test] + fn test_token_transfer_adding_group_info_causes_error() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (recipient, _, _) = setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + // let's start by creating a real action + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_manual_minting_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (recipient.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let token_mint_transition = BatchTransition::new_token_mint_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + Some(identity.id()), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_mint_serialized_transition = token_mint_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_mint_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let action_id = TokenMintTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + 1337, + ); + + let mut token_transfer_transition = BatchTransition::new_token_transfer_transition( + token_id, + identity.id(), + contract.id(), + 0, + 200000, + recipient.id(), + None, + None, + None, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + // here we add fake info + match &mut token_transfer_transition { + StateTransition::Batch(batch) => { + let first_transition = batch + .first_transition_mut() + .expect("expected_first_transition"); + match first_transition { + BatchedTransitionMutRef::Token(token) => match token { + TokenTransition::Transfer(transfer) => transfer + .base_mut() + .set_using_group_info(Some(GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: true, + })), + _ => {} + }, + _ => {} + } + } + _ => {} + } + + token_transfer_transition + .sign_external(&key, &signer, Some(|_, _| Ok(SecurityLevel::HIGH))) + .expect("expected to resign transaction"); + + let token_transfer_serialized_transition = token_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::UnpaidConsensusError( + ConsensusError::BasicError(BasicError::GroupActionNotAllowedOnTransitionError( + _ + )) + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 100000; + assert_eq!(token_balance, Some(expected_amount)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + assert_eq!(token_balance, None); + } + } + + mod token_freeze_tests { + use super::*; + use dpp::tokens::info::v0::IdentityTokenInfoV0Accessors; + + #[test] + fn test_token_freeze() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_freeze_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let freeze_transition = BatchTransition::new_token_freeze_transition( + token_id, + identity.id(), + contract.id(), + 0, + identity_2.id(), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let freeze_serialized_transition = freeze_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![freeze_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_frozen = platform + .drive + .fetch_identity_token_info( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token info") + .map(|info| info.frozen()); + assert_eq!(token_frozen, Some(true)); + } + + #[test] + fn test_token_freeze_and_unfreeze() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_freeze_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + token_configuration.set_unfreeze_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let freeze_transition = BatchTransition::new_token_freeze_transition( + token_id, + identity.id(), + contract.id(), + 0, + identity_2.id(), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let freeze_serialized_transition = freeze_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![freeze_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_frozen = platform + .drive + .fetch_identity_token_info( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token info") + .map(|info| info.frozen()); + assert_eq!(token_frozen, Some(true)); + + let unfreeze_transition = BatchTransition::new_token_unfreeze_transition( + token_id, + identity.id(), + contract.id(), + 0, + identity_2.id(), + None, + None, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let unfreeze_serialized_transition = unfreeze_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![unfreeze_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_frozen = platform + .drive + .fetch_identity_token_info( + token_id.to_buffer(), + identity_2.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token info") + .map(|info| info.frozen()); + assert_eq!(token_frozen, Some(false)); + } + + #[test] + fn test_token_frozen_receive_balance_allowed_sending_not_allowed_till_unfrozen() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (recipient, signer2, key2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_freeze_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + token_configuration.set_unfreeze_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let freeze_transition = BatchTransition::new_token_freeze_transition( + token_id, + identity.id(), + contract.id(), + 0, + recipient.id(), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let freeze_serialized_transition = freeze_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![freeze_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_frozen = platform + .drive + .fetch_identity_token_info( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token info") + .map(|info| info.frozen()); + assert_eq!(token_frozen, Some(true)); + + let token_transfer_transition = BatchTransition::new_token_transfer_transition( + token_id, + identity.id(), + contract.id(), + 0, + 1337, + recipient.id(), + None, + None, + None, + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_transfer_serialized_transition = token_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 100000 - 1337; + assert_eq!(token_balance, Some(expected_amount)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 1337; + assert_eq!(token_balance, Some(expected_amount)); + + //now let's try sending our balance + + let token_transfer_back_transition = BatchTransition::new_token_transfer_transition( + token_id, + recipient.id(), + contract.id(), + 0, + 300, + identity.id(), + None, + None, + None, + &key2, + 2, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_transfer_back_serialized_transition = token_transfer_back_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_transfer_back_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::IdentityTokenAccountFrozenError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + // We expect no change + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 100000 - 1337; + assert_eq!(token_balance, Some(expected_amount)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 1337; + assert_eq!(token_balance, Some(expected_amount)); + + let unfreeze_transition = BatchTransition::new_token_unfreeze_transition( + token_id, + identity.id(), + contract.id(), + 0, + recipient.id(), + None, + None, + &key, + 4, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let unfreeze_serialized_transition = unfreeze_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![unfreeze_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_frozen = platform + .drive + .fetch_identity_token_info( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token info") + .map(|info| info.frozen()); + assert_eq!(token_frozen, Some(false)); + + let token_transfer_transition = BatchTransition::new_token_transfer_transition( + token_id, + recipient.id(), + contract.id(), + 0, + 300, + identity.id(), + None, + None, + None, + &key2, + 3, + 0, + &signer2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let token_transfer_serialized_transition = token_transfer_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![token_transfer_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + identity.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 100000 - 1337 + 300; + assert_eq!(token_balance, Some(expected_amount)); + + let token_balance = platform + .drive + .fetch_identity_token_balance( + token_id.to_buffer(), + recipient.id().to_buffer(), + None, + platform_version, + ) + .expect("expected to fetch token balance"); + let expected_amount = 1337 - 300; + assert_eq!(token_balance, Some(expected_amount)); + } + } + + mod token_config_update_tests { + use super::*; + use dpp::data_contract::accessors::v1::DataContractV1Getters; + use dpp::data_contract::associated_token::token_configuration_convention::TokenConfigurationConvention; + use dpp::data_contract::associated_token::token_configuration_item::TokenConfigurationChangeItem; + + mod non_group { + use super::*; + #[test] + fn test_token_config_update_by_owner_changing_total_max_supply() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupply(Some(1000000)), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!(updated_token_config.max_supply(), Some(1000000)); + } + + #[test] + fn test_token_config_update_by_owner_changing_total_max_supply_to_less_than_current_supply( + ) { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupply(Some(1000)), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::TokenSettingMaxSupplyToLessThanCurrentSupplyError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!(updated_token_config.max_supply(), None); + } + + #[test] + fn test_token_config_update_by_owner_change_admin_to_another_identity() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer_2, key_2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::ContractOwner, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupplyControlGroup( + AuthorizedActionTakers::Identity(identity_2.id()), + ), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupply(Some(1000000)), + None, + None, + &key_2, + 2, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!(updated_token_config.max_supply(), Some(1000000)); + } + + #[test] + fn test_token_config_update_by_owner_change_admin_to_a_non_existent_identity_error() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let identity_2_id = Identifier::random_with_rng(&mut rng); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::ContractOwner, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupplyControlGroup( + AuthorizedActionTakers::Identity(identity_2_id), + ), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::NewAuthorizedActionTakerIdentityDoesNotExistError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + + #[test] + fn test_token_config_update_by_owner_change_admin_to_a_non_existent_group_error() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::ContractOwner, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupplyControlGroup( + AuthorizedActionTakers::Group(0), + ), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::NewAuthorizedActionTakerGroupDoesNotExistError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + + #[test] + fn test_token_config_update_by_owner_change_admin_to_main_group_not_set_error() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::ContractOwner, + admin_action_takers: AuthorizedActionTakers::ContractOwner, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + None, + platform_version, + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupplyControlGroup( + AuthorizedActionTakers::MainGroup, + ), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError( + StateError::NewAuthorizedActionTakerMainGroupNotSetError(_) + ), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + } + + mod with_group { + use super::*; + use dpp::data_contract::associated_token::token_configuration_localization::v0::TokenConfigurationLocalizationV0; + + #[test] + fn test_token_config_update_by_group_member_changing_total_max_supply_not_using_group_gives_error( + ) { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, _, _) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (identity_2.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupply(Some(1000000)), + None, + None, + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!(updated_token_config.max_supply(), None); + } + + #[test] + fn test_token_config_update_by_group_member_changing_total_max_supply() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer_2, key_2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_max_supply_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::NoOne, + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: false, + }, + )); + }), + Some( + [( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (identity_2.id(), 1)].into(), + required_power: 2, + }), + )] + .into(), + ), + platform_version, + ); + + let action_id = TokenConfigUpdateTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + TokenConfigurationChangeItem::MaxSupply(Some(1000000)).u8_item_index(), + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupply(Some(1000000)), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let new_contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = new_contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!(updated_token_config.max_supply(), None); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::MaxSupply(Some(1000000)), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id, + action_is_proposer: false, + }, + ), + ), + &key_2, + 2, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let new_contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = new_contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!(updated_token_config.max_supply(), Some(1000000)); + } + + #[test] + fn test_token_config_change_own_admin_group_give_control_power_and_change_admin_back() { + let platform_version = PlatformVersion::latest(); + let mut platform = TestPlatformBuilder::new() + .with_latest_protocol_version() + .build_with_mock_rpc() + .set_genesis_state(); + + let mut rng = StdRng::seed_from_u64(49853); + + let platform_state = platform.state.load(); + + let (identity, signer, key) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_2, signer_2, key_2) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_3, signer_3, key_3) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_4, signer_4, key_4) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (identity_5, signer_5, key_5) = + setup_identity(&mut platform, rng.gen(), dash_to_credits!(0.5)); + + let (contract, token_id) = create_token_contract_with_owner_identity( + &mut platform, + identity.id(), + Some(|token_configuration: &mut TokenConfiguration| { + token_configuration.set_conventions_change_rules(ChangeControlRules::V0( + ChangeControlRulesV0 { + authorized_to_make_change: AuthorizedActionTakers::Group(0), + admin_action_takers: AuthorizedActionTakers::Group(1), + changing_authorized_action_takers_to_no_one_allowed: false, + changing_admin_action_takers_to_no_one_allowed: false, + self_changing_admin_action_takers_allowed: true, + }, + )); + }), + Some( + [ + ( + 0, + Group::V0(GroupV0 { + members: [(identity.id(), 1), (identity_2.id(), 1)].into(), + required_power: 2, + }), + ), + ( + 1, + Group::V0(GroupV0 { + members: [ + (identity_3.id(), 1), + (identity_4.id(), 1), + (identity_5.id(), 1), + ] + .into(), + required_power: 2, + }), + ), + ] + .into(), + ), + platform_version, + ); + + let action_id = TokenConfigUpdateTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity_3.id().as_bytes(), + 2, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(0), + ) + .u8_item_index(), + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_3.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(0), + ), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(1)), + &key_3, + 2, + 0, + &signer_3, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let new_contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = new_contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!( + updated_token_config + .conventions_change_rules() + .admin_action_takers(), + &AuthorizedActionTakers::Group(1) + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_4.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(0), + ), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 1, + action_id, + action_is_proposer: false, + }, + ), + ), + &key_4, + 2, + 0, + &signer_4, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let new_contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = new_contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!( + updated_token_config + .conventions_change_rules() + .admin_action_takers(), + &AuthorizedActionTakers::Group(0) + ); + assert_eq!(new_contract.contract.version(), 2); + + // 5 is late to the game, admin control has already been transferred, he should get an error + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_5.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(0), + ), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 1, + action_id, + action_is_proposer: false, + }, + ), + ), + &key_5, + 2, + 0, + &signer_5, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::GroupActionAlreadyCompletedError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + // Let's try if he proposes it now + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_5.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(0), + ), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(1)), + &key_5, + 3, + 0, + &signer_5, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + // Now let's have Group 0 change the control of the conventions to identity 2 only + + let action_id_change_control = + TokenConfigUpdateTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 2, + TokenConfigurationChangeItem::ConventionsControlGroup( + AuthorizedActionTakers::Identity(identity_2.id()), + ) + .u8_item_index(), + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsControlGroup( + AuthorizedActionTakers::Identity(identity_2.id()), + ), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 2, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsControlGroup( + AuthorizedActionTakers::Identity(identity_2.id()), + ), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id: action_id_change_control, + action_is_proposer: false, + }, + ), + ), + &key_2, + 2, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let new_contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = new_contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!( + updated_token_config + .conventions_change_rules() + .authorized_to_make_change_action_takers(), + &AuthorizedActionTakers::Identity(identity_2.id()) + ); + assert_eq!(new_contract.contract.version(), 3); + + // Now let's have Group 0 hand it back to Group 1 + + let action_id_return = TokenConfigUpdateTransition::calculate_action_id_with_fields( + token_id.as_bytes(), + identity.id().as_bytes(), + 3, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(1), + ) + .u8_item_index(), + ); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(1), + ), + None, + Some(GroupStateTransitionInfoStatus::GroupStateTransitionInfoProposer(0)), + &key, + 3, + 0, + &signer, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::ConventionsAdminGroup( + AuthorizedActionTakers::Group(1), + ), + None, + Some( + GroupStateTransitionInfoStatus::GroupStateTransitionInfoOtherSigner( + GroupStateTransitionInfo { + group_contract_position: 0, + action_id: action_id_return, + action_is_proposer: false, + }, + ), + ), + &key_2, + 3, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + let new_contract = platform + .drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to fetch token balance") + .expect("expected contract"); + let updated_token_config = new_contract + .contract + .expected_token_configuration(0) + .expect("expected token configuration"); + assert_eq!( + updated_token_config + .conventions_change_rules() + .admin_action_takers(), + &AuthorizedActionTakers::Group(1) + ); + assert_eq!(new_contract.contract.version(), 4); + + // Not let's try identity 3 to change the conventions (should fail) + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_3.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::Conventions(TokenConfigurationConvention::V0( + TokenConfigurationConventionV0 { + localizations: [( + "en".to_string(), + TokenConfigurationLocalizationV0 { + should_capitalize: true, + singular_form: "garzon".to_string(), + plural_form: "garzons".to_string(), + } + .into(), + )] + .into(), + decimals: 8, + }, + )), + None, + None, + &key_3, + 3, + 0, + &signer_3, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::PaidConsensusError( + ConsensusError::StateError(StateError::UnauthorizedTokenActionError(_)), + _ + )] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + + // Not let's try identity 2 to change the conventions (should succeed) + + let config_update_transition = BatchTransition::new_token_config_update_transition( + token_id, + identity_2.id(), + contract.id(), + 0, + TokenConfigurationChangeItem::Conventions(TokenConfigurationConvention::V0( + TokenConfigurationConventionV0 { + localizations: [( + "en".to_string(), + TokenConfigurationLocalization::V0( + TokenConfigurationLocalizationV0 { + should_capitalize: true, + singular_form: "garzon".to_string(), + plural_form: "garzons".to_string(), + }, + ), + )] + .into(), + decimals: 8, + }, + )), + None, + None, + &key_2, + 4, + 0, + &signer_2, + platform_version, + None, + None, + None, + ) + .expect("expect to create documents batch transition"); + + let config_update_transition_serialized_transition = config_update_transition + .serialize_to_bytes() + .expect("expected documents batch serialized state transition"); + + let transaction = platform.drive.grove.start_transaction(); + + let processing_result = platform + .platform + .process_raw_state_transitions( + &vec![config_update_transition_serialized_transition.clone()], + &platform_state, + &BlockInfo::default(), + &transaction, + platform_version, + false, + None, + ) + .expect("expected to process state transition"); + + assert_matches!( + processing_result.execution_results().as_slice(), + [StateTransitionExecutionResult::SuccessfulExecution(_, _)] + ); + + platform + .drive + .grove + .commit_transaction(transaction) + .unwrap() + .expect("expected to commit transaction"); + } + } + } +} diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/transformer/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/transformer/v0/mod.rs index 4828d923f8d..7ed136e6e2c 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/transformer/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/batch/transformer/v0/mod.rs @@ -60,6 +60,7 @@ use drive::state_transition_action::batch::batched_transition::token_transition: use drive::state_transition_action::batch::batched_transition::token_transition::token_emergency_action_transition_action::TokenEmergencyActionTransitionAction; use drive::state_transition_action::batch::batched_transition::token_transition::token_freeze_transition_action::TokenFreezeTransitionAction; use drive::state_transition_action::batch::batched_transition::token_transition::token_mint_transition_action::TokenMintTransitionAction; +use drive::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::TokenClaimTransitionAction; use drive::state_transition_action::batch::batched_transition::token_transition::token_transfer_transition_action::TokenTransferTransitionAction; use drive::state_transition_action::batch::batched_transition::token_transition::token_unfreeze_transition_action::TokenUnfreezeTransitionAction; use drive::state_transition_action::batch::batched_transition::token_transition::TokenTransitionAction; @@ -584,6 +585,16 @@ impl BatchTransitionInternalTransformerV0 for BatchTransition { Ok(data_contract_fetch_info.clone()) }, platform_version)?; + execution_context + .add_operation(ValidationOperation::PrecalculatedOperation(fee_result)); + + Ok(batched_action) + } + TokenTransition::Claim(release) => { + let (batched_action, fee_result) = TokenClaimTransitionAction::try_from_borrowed_token_claim_transition_with_contract_lookup(drive, owner_id, release, approximate_for_costs, transaction, block_info, user_fee_increase, |_identifier| { + Ok(data_contract_fetch_info.clone()) + }, platform_version)?; + execution_context .add_operation(ValidationOperation::PrecalculatedOperation(fee_result)); diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/mod.rs index 7069e90904f..b411f716051 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/mod.rs @@ -43,7 +43,7 @@ impl StateTransitionActionTransformerV0 for DataContractCreateTransition { fn transform_into_action( &self, platform: &PlatformRef, - _block_info: &BlockInfo, + block_info: &BlockInfo, validation_mode: ValidationMode, execution_context: &mut StateTransitionExecutionContext, _tx: TransactionArg, @@ -58,6 +58,7 @@ impl StateTransitionActionTransformerV0 for DataContractCreateTransition { .transform_into_action { 0 => self.transform_into_action_v0::( + block_info, validation_mode, execution_context, platform_version, @@ -124,8 +125,8 @@ impl StateTransitionStateValidationV0 for DataContractCreateTransition { { 0 => self.validate_state_v0( platform, + block_info, validation_mode, - &block_info.epoch, tx, execution_context, platform_version, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/state/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/state/v0/mod.rs index 3385fd125d8..7534edc0c3d 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/state/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_create/state/v0/mod.rs @@ -1,6 +1,7 @@ use crate::error::Error; use crate::platform_types::platform::PlatformRef; use crate::rpc::core::CoreRPCLike; +use dpp::block::block_info::BlockInfo; use dpp::block::epoch::Epoch; use dpp::consensus::state::data_contract::data_contract_already_present_error::DataContractAlreadyPresentError; @@ -26,8 +27,8 @@ pub(in crate::execution::validation::state_transition::state_transitions::data_c fn validate_state_v0( &self, platform: &PlatformRef, + block_info: &BlockInfo, validation_mode: ValidationMode, - epoch: &Epoch, tx: TransactionArg, execution_context: &mut StateTransitionExecutionContext, platform_version: &PlatformVersion, @@ -35,6 +36,7 @@ pub(in crate::execution::validation::state_transition::state_transitions::data_c fn transform_into_action_v0( &self, + block_info: &BlockInfo, validation_mode: ValidationMode, execution_context: &mut StateTransitionExecutionContext, platform_version: &PlatformVersion, @@ -45,13 +47,14 @@ impl DataContractCreateStateTransitionStateValidationV0 for DataContractCreateTr fn validate_state_v0( &self, platform: &PlatformRef, + block_info: &BlockInfo, validation_mode: ValidationMode, - epoch: &Epoch, tx: TransactionArg, execution_context: &mut StateTransitionExecutionContext, platform_version: &PlatformVersion, ) -> Result, Error> { let action = self.transform_into_action_v0::( + block_info, validation_mode, execution_context, platform_version, @@ -63,7 +66,7 @@ impl DataContractCreateStateTransitionStateValidationV0 for DataContractCreateTr let contract_fetch_info = platform.drive.get_contract_with_fetch_info_and_fee( self.data_contract().id().to_buffer(), - Some(epoch), + Some(&block_info.epoch), false, tx, platform_version, @@ -98,6 +101,7 @@ impl DataContractCreateStateTransitionStateValidationV0 for DataContractCreateTr fn transform_into_action_v0( &self, + block_info: &BlockInfo, validation_mode: ValidationMode, execution_context: &mut StateTransitionExecutionContext, platform_version: &PlatformVersion, @@ -108,6 +112,7 @@ impl DataContractCreateStateTransitionStateValidationV0 for DataContractCreateTr // The contract in serialized form into it's execution form let result = DataContractCreateTransitionAction::try_from_borrowed_transition( self, + block_info, validation_mode.should_fully_validate_contract_on_transform_into_action(), &mut validation_operations, platform_version, @@ -227,8 +232,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), None, &mut execution_context, platform_version, @@ -320,8 +325,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), None, &mut execution_context, platform_version, @@ -403,8 +408,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), None, &mut execution_context, platform_version, @@ -470,8 +475,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), None, &mut execution_context, platform_version, @@ -534,6 +539,7 @@ mod tests { let result = transition .transform_into_action_v0::( + &BlockInfo::default(), ValidationMode::Validator, &mut execution_context, platform_version, @@ -588,6 +594,7 @@ mod tests { let result = transition .transform_into_action_v0::( + &BlockInfo::default(), ValidationMode::Validator, &mut execution_context, platform_version, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/mod.rs index a4abb0b9040..32b04b772a8 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/mod.rs @@ -25,7 +25,7 @@ impl StateTransitionActionTransformerV0 for DataContractUpdateTransition { fn transform_into_action( &self, platform: &PlatformRef, - _block_info: &BlockInfo, + block_info: &BlockInfo, validation_mode: ValidationMode, execution_context: &mut StateTransitionExecutionContext, _tx: TransactionArg, @@ -39,9 +39,12 @@ impl StateTransitionActionTransformerV0 for DataContractUpdateTransition { .contract_update_state_transition .transform_into_action { - 0 => { - self.transform_into_action_v0(validation_mode, execution_context, platform_version) - } + 0 => self.transform_into_action_v0( + block_info, + validation_mode, + execution_context, + platform_version, + ), version => Err(Error::Execution(ExecutionError::UnknownVersionMismatch { method: "data contract update transition: transform_into_action".to_string(), known_versions: vec![0], diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/mod.rs index 3d32ae75290..3ad475000a6 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/mod.rs @@ -40,8 +40,8 @@ impl StateTransitionStateValidationV0 for DataContractUpdateTransition { } self.validate_state_v0( platform, + &block_info, validation_mode, - &block_info.epoch, execution_context, tx, platform_version, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/v0/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/v0/mod.rs index 5435c8ad18d..c44713240fb 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/v0/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/data_contract_update/state/v0/mod.rs @@ -1,13 +1,14 @@ use crate::error::Error; use crate::platform_types::platform::PlatformRef; use crate::rpc::core::CoreRPCLike; +use dpp::block::block_info::BlockInfo; use dpp::block::epoch::Epoch; use dpp::consensus::basic::document::DataContractNotPresentError; use dpp::consensus::basic::BasicError; use dpp::data_contract::accessors::v0::DataContractV0Getters; - +use dpp::data_contract::accessors::v1::{DataContractV1Getters, DataContractV1Setters}; use dpp::data_contract::validate_update::DataContractUpdateValidationMethodsV0; use dpp::prelude::ConsensusValidationResult; @@ -32,8 +33,8 @@ pub(in crate::execution::validation::state_transition::state_transitions::data_c fn validate_state_v0( &self, platform: &PlatformRef, + block_info: &BlockInfo, validation_mode: ValidationMode, - epoch: &Epoch, execution_context: &mut StateTransitionExecutionContext, tx: TransactionArg, platform_version: &PlatformVersion, @@ -41,6 +42,7 @@ pub(in crate::execution::validation::state_transition::state_transitions::data_c fn transform_into_action_v0( &self, + block_info: &BlockInfo, validation_mode: ValidationMode, execution_context: &mut StateTransitionExecutionContext, platform_version: &PlatformVersion @@ -51,20 +53,24 @@ impl DataContractUpdateStateTransitionStateValidationV0 for DataContractUpdateTr fn validate_state_v0( &self, platform: &PlatformRef, + block_info: &BlockInfo, validation_mode: ValidationMode, - epoch: &Epoch, execution_context: &mut StateTransitionExecutionContext, tx: TransactionArg, platform_version: &PlatformVersion, ) -> Result, Error> { - let action = - self.transform_into_action_v0(validation_mode, execution_context, platform_version)?; + let mut action = self.transform_into_action_v0( + block_info, + validation_mode, + execution_context, + platform_version, + )?; if !action.is_valid() { return Ok(action); } - let state_transition_action = action.data.as_ref().ok_or(Error::Execution( + let state_transition_action = action.data.as_mut().ok_or(Error::Execution( ExecutionError::CorruptedCodeExecution( "we should always have an action at this point in data contract update", ), @@ -72,7 +78,7 @@ impl DataContractUpdateStateTransitionStateValidationV0 for DataContractUpdateTr let new_data_contract = match state_transition_action { StateTransitionAction::DataContractUpdateAction(action) => { - Some(action.data_contract_ref()) + Some(action.data_contract_mut()) } _ => None, } @@ -92,7 +98,7 @@ impl DataContractUpdateStateTransitionStateValidationV0 for DataContractUpdateTr let data_contract_fetch_info = drive.get_contract_with_fetch_info_and_fee( new_data_contract.id().to_buffer(), - Some(epoch), + Some(&block_info.epoch), add_to_cache_if_pulled, tx, platform_version, @@ -144,11 +150,16 @@ impl DataContractUpdateStateTransitionStateValidationV0 for DataContractUpdateTr )); } + new_data_contract.set_created_at(old_data_contract.created_at()); + new_data_contract.set_created_at_block_height(old_data_contract.created_at_block_height()); + new_data_contract.set_created_at_epoch(old_data_contract.created_at_epoch()); + Ok(action) } fn transform_into_action_v0( &self, + block_info: &BlockInfo, validation_mode: ValidationMode, execution_context: &mut StateTransitionExecutionContext, platform_version: &PlatformVersion, @@ -157,6 +168,7 @@ impl DataContractUpdateStateTransitionStateValidationV0 for DataContractUpdateTr let result = DataContractUpdateTransitionAction::try_from_borrowed_transition( self, + block_info, validation_mode.should_fully_validate_contract_on_transform_into_action(), &mut validation_operations, platform_version, @@ -266,8 +278,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), &mut execution_context, None, platform_version, @@ -341,8 +353,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), &mut execution_context, None, platform_version, @@ -426,8 +438,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), &mut execution_context, None, platform_version, @@ -512,8 +524,8 @@ mod tests { let result = transition .validate_state_v0::( &platform_ref, + &BlockInfo::default(), ValidationMode::Validator, - &Epoch::default(), &mut execution_context, None, platform_version, @@ -581,6 +593,7 @@ mod tests { let result = transition .transform_into_action_v0( + &BlockInfo::default(), ValidationMode::Validator, &mut execution_context, platform_version, @@ -639,6 +652,7 @@ mod tests { let result = transition .transform_into_action_v0( + &BlockInfo::default(), ValidationMode::Validator, &mut execution_context, platform_version, diff --git a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/mod.rs b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/mod.rs index 0c7b345be77..5959bbc5465 100644 --- a/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/mod.rs +++ b/packages/rs-drive-abci/src/execution/validation/state_transition/state_transitions/mod.rs @@ -2303,6 +2303,9 @@ pub(in crate::execution) mod tests { Some(data_contract_id.to_buffer()), Some(identity_id.to_buffer()), Some(|data_contract: &mut DataContract| { + data_contract.set_created_at_epoch(Some(0)); + data_contract.set_created_at(Some(0)); + data_contract.set_created_at_block_height(Some(0)); if let Some(token_configuration_modification) = token_configuration_modification { let token_configuration = data_contract .token_configuration_mut(0) diff --git a/packages/rs-drive-abci/src/main.rs b/packages/rs-drive-abci/src/main.rs index 67a68a40914..871333d8c1d 100644 --- a/packages/rs-drive-abci/src/main.rs +++ b/packages/rs-drive-abci/src/main.rs @@ -17,7 +17,7 @@ use drive_abci::rpc::core::DefaultCoreRPC; use drive_abci::{logging, server}; use itertools::Itertools; use std::fs::remove_file; -#[cfg(tokio_unstable)] +#[cfg(all(tokio_unstable, feature = "console"))] use std::net::SocketAddr; use std::path::PathBuf; use std::process::ExitCode; @@ -27,9 +27,9 @@ use tokio::runtime::{Builder, Runtime}; use tokio::signal::unix::{signal, SignalKind}; use tokio::time::Duration; use tokio_util::sync::CancellationToken; -#[cfg(tokio_unstable)] +#[cfg(all(tokio_unstable, feature = "console"))] use tracing_subscriber::layer::SubscriberExt; -#[cfg(tokio_unstable)] +#[cfg(all(tokio_unstable, feature = "console"))] use tracing_subscriber::util::SubscriberInitExt; const SHUTDOWN_TIMEOUT_MILIS: u64 = 5000; // 5s; Docker defaults to 10s diff --git a/packages/rs-drive-abci/src/query/group_queries/group_action_signers/v0/mod.rs b/packages/rs-drive-abci/src/query/group_queries/group_action_signers/v0/mod.rs index ed1ca8c424b..591ae1a3553 100644 --- a/packages/rs-drive-abci/src/query/group_queries/group_action_signers/v0/mod.rs +++ b/packages/rs-drive-abci/src/query/group_queries/group_action_signers/v0/mod.rs @@ -29,7 +29,6 @@ impl Platform { platform_state: &PlatformState, platform_version: &PlatformVersion, ) -> Result, Error> { - let config = &self.config.drive; let contract_id: Identifier = check_validation_result_with_data!(contract_id.try_into().map_err(|_| { QueryError::InvalidArgument( diff --git a/packages/rs-drive-abci/src/query/group_queries/group_actions/v0/mod.rs b/packages/rs-drive-abci/src/query/group_queries/group_actions/v0/mod.rs index d9d8aa09b32..d714a52b8c8 100644 --- a/packages/rs-drive-abci/src/query/group_queries/group_actions/v0/mod.rs +++ b/packages/rs-drive-abci/src/query/group_queries/group_actions/v0/mod.rs @@ -8,7 +8,7 @@ use dapi_grpc::platform::v0::get_group_actions_response::get_group_actions_respo emergency_action_event, group_action_event, token_event, BurnEvent, DestroyFrozenFundsEvent, EmergencyActionEvent, FreezeEvent, GroupActionEntry, GroupActionEvent, GroupActions, MintEvent, PersonalEncryptedNote, SharedEncryptedNote, TokenConfigUpdateEvent, - TokenEvent as TokenEventResponse, TransferEvent, UnfreezeEvent, + TokenEvent as TokenEventResponse, UnfreezeEvent, }; use dapi_grpc::platform::v0::get_group_actions_response::{ get_group_actions_response_v0, GetGroupActionsResponseV0, @@ -181,36 +181,8 @@ impl Platform { )), }) } - TokenEvent::Transfer( - recipient_id, - public_note, - shared_encrypted_note, - personal_encrypted_note, - amount, - ) => { - group_action_event::EventType::TokenEvent(TokenEventResponse { - r#type: Some(token_event::Type::Transfer(TransferEvent { - recipient_id: recipient_id.to_vec(), - public_note, - shared_encrypted_note: shared_encrypted_note - .map(|(sender_key_index, recipient_key_index, note)| { - SharedEncryptedNote { - sender_key_index, - recipient_key_index, - encrypted_data: note, - } - }), - personal_encrypted_note: personal_encrypted_note - .map(|(root_encryption_key_index, derivation_encryption_key_index, note)| { - PersonalEncryptedNote { - root_encryption_key_index, - derivation_encryption_key_index, - encrypted_data: note, - } - }), - amount: amount as u64, - })), - }) + TokenEvent::Transfer(..) => { + return None; }, TokenEvent::EmergencyAction(action, public_note) => { group_action_event::EventType::TokenEvent(TokenEventResponse { @@ -231,6 +203,9 @@ impl Platform { })), }) } + TokenEvent::Claim(..) => { + return None; + } }, }, }), diff --git a/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_evonode_ids/v0/mod.rs b/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_evonode_ids/v0/mod.rs index 6780b1a6907..11e627b031f 100644 --- a/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_evonode_ids/v0/mod.rs +++ b/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_evonode_ids/v0/mod.rs @@ -89,7 +89,10 @@ impl Platform { platform_version, )? .into_iter() - .map(|(pro_tx_hash, count)| EvonodeProposedBlocks { pro_tx_hash, count }) + .map(|(pro_tx_hash, count)| EvonodeProposedBlocks { + pro_tx_hash: pro_tx_hash.to_vec(), + count, + }) .collect(); let evonode_proposed_blocks = EvonodesProposedBlocks { diff --git a/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_range/v0/mod.rs b/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_range/v0/mod.rs index 9ba56968ec3..4db48462ab4 100644 --- a/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_range/v0/mod.rs +++ b/packages/rs-drive-abci/src/query/validator_queries/proposed_block_counts_by_range/v0/mod.rs @@ -117,7 +117,10 @@ impl Platform { platform_version, )? .into_iter() - .map(|(pro_tx_hash, count)| EvonodeProposedBlocks { pro_tx_hash, count }) + .map(|(pro_tx_hash, count)| EvonodeProposedBlocks { + pro_tx_hash: pro_tx_hash.to_vec(), + count, + }) .collect(); let evonode_proposed_blocks = EvonodesProposedBlocks { diff --git a/packages/rs-drive-abci/tests/strategy_tests/main.rs b/packages/rs-drive-abci/tests/strategy_tests/main.rs index 38fc21253b7..f2ad6e75046 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/main.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/main.rs @@ -2358,135 +2358,159 @@ mod tests { #[test] fn run_chain_insert_many_new_identity_per_block_many_document_insertions_and_deletions_with_epoch_change( ) { - let platform_version = PlatformVersion::latest(); - let created_contract = json_document_to_created_contract( - "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", - 1, - true, - platform_version, - ) - .expect("expected to get contract from a json document"); - - let contract = created_contract.data_contract(); - - let document_insertion_op = DocumentOp { - contract: contract.clone(), - action: DocumentAction::DocumentActionInsertRandom( - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let document_deletion_op = DocumentOp { - contract: contract.clone(), - action: DocumentAction::DocumentActionDelete, - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let strategy = NetworkStrategy { - strategy: Strategy { - start_contracts: vec![(created_contract, None)], - operations: vec![ - Operation { - op_type: OperationType::Document(document_insertion_op), - frequency: Frequency { - times_per_block_range: 1..40, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_deletion_op), - frequency: Frequency { - times_per_block_range: 1..15, - chance_per_block: None, + // Define the desired stack size + let stack_size = 4 * 1024 * 1024; //Let's set the stack size to be higher than the default 2MB + + let builder = std::thread::Builder::new() + .stack_size(stack_size) + .name("custom_stack_size_thread".into()); + + let handler = builder + .spawn(|| { + let platform_version = PlatformVersion::latest(); + let created_contract = json_document_to_created_contract( + "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", + 1, + true, + platform_version, + ) + .expect("expected to get contract from a json document"); + + let contract = created_contract.data_contract(); + + let document_insertion_op = DocumentOp { + contract: contract.clone(), + action: DocumentAction::DocumentActionInsertRandom( + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let document_deletion_op = DocumentOp { + contract: contract.clone(), + action: DocumentAction::DocumentActionDelete, + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let strategy = NetworkStrategy { + strategy: Strategy { + start_contracts: vec![(created_contract, None)], + operations: vec![ + Operation { + op_type: OperationType::Document(document_insertion_op), + frequency: Frequency { + times_per_block_range: 1..40, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_deletion_op), + frequency: Frequency { + times_per_block_range: 1..15, + chance_per_block: None, + }, + }, + ], + start_identities: StartIdentities::default(), + identity_inserts: IdentityInsertInfo { + frequency: Frequency { + times_per_block_range: 1..30, + chance_per_block: None, + }, + start_keys: 5, + extra_keys: Default::default(), + start_balance_range: dash_to_duffs!(1)..=dash_to_duffs!(1), }, - }, - ], - start_identities: StartIdentities::default(), - identity_inserts: IdentityInsertInfo { - frequency: Frequency { - times_per_block_range: 1..30, - chance_per_block: None, - }, - start_keys: 5, - extra_keys: Default::default(), - start_balance_range: dash_to_duffs!(1)..=dash_to_duffs!(1), - }, - identity_contract_nonce_gaps: None, - signer: None, - }, - total_hpmns: 100, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: None, - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: true, - ..Default::default() - }; - - let day_in_ms = 1000 * 60 * 60 * 24; + identity_contract_nonce_gaps: None, + signer: None, + }, + total_hpmns: 100, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: None, + + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: true, + ..Default::default() + }; - let config = PlatformConfig { - validator_set: ValidatorSetConfig::default_100_67(), - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - verify_sum_trees: true, + let day_in_ms = 1000 * 60 * 60 * 24; - epoch_time_length_s: 1576800, - ..Default::default() - }, - block_spacing_ms: day_in_ms, - testing_configs: PlatformTestConfig::default_minimal_verifications(), - ..Default::default() - }; - let block_count = 30; - let mut platform = TestPlatformBuilder::new() - .with_config(config.clone()) - .build_with_mock_rpc(); + let config = PlatformConfig { + validator_set: ValidatorSetConfig::default_100_67(), + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + verify_sum_trees: true, - let outcome = - run_chain_for_strategy(&mut platform, block_count, strategy, config, 15, &mut None); - assert_eq!(outcome.identities.len() as u64, 472); - assert_eq!(outcome.masternode_identity_balances.len(), 100); - let balance_count = outcome - .masternode_identity_balances - .into_iter() - .filter(|(_, balance)| *balance != 0) - .count(); - assert_eq!(balance_count, 19); // 1 epoch worth of proposers + epoch_time_length_s: 1576800, + ..Default::default() + }, + block_spacing_ms: day_in_ms, + testing_configs: PlatformTestConfig::default_minimal_verifications(), + ..Default::default() + }; + let block_count = 30; + let mut platform = TestPlatformBuilder::new() + .with_config(config.clone()) + .build_with_mock_rpc(); + + let outcome = run_chain_for_strategy( + &mut platform, + block_count, + strategy, + config, + 15, + &mut None, + ); + assert_eq!(outcome.identities.len() as u64, 472); + assert_eq!(outcome.masternode_identity_balances.len(), 100); + let balance_count = outcome + .masternode_identity_balances + .into_iter() + .filter(|(_, balance)| *balance != 0) + .count(); + assert_eq!(balance_count, 19); // 1 epoch worth of proposers - let issues = outcome - .abci_app - .platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); + let issues = outcome + .abci_app + .platform + .drive + .grove + .visualize_verify_grovedb( + None, + true, + false, + &platform_version.drive.grove_version, + ) + .expect("expected to have no issues"); - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + }) + .expect("Failed to create thread with custom stack size"); + // Wait for the thread to finish and assert that it didn't panic. + handler.join().expect("Thread has panicked"); } #[test] @@ -2767,298 +2791,341 @@ mod tests { #[test] fn run_chain_insert_many_new_identity_per_block_many_document_insertions_updates_and_deletions_with_epoch_change( ) { - let platform_version = PlatformVersion::latest(); - let created_contract = json_document_to_created_contract( - "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", - 1, - true, - platform_version, - ) - .expect("expected to get contract from a json document"); - - let contract = created_contract.data_contract(); - - let document_insertion_op = DocumentOp { - contract: contract.clone(), - action: DocumentAction::DocumentActionInsertRandom( - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let document_replace_op = DocumentOp { - contract: contract.clone(), - action: DocumentActionReplaceRandom, - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let document_deletion_op = DocumentOp { - contract: contract.clone(), - action: DocumentAction::DocumentActionDelete, - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let strategy = NetworkStrategy { - strategy: Strategy { - start_contracts: vec![(created_contract, None)], - operations: vec![ - Operation { - op_type: OperationType::Document(document_insertion_op), - frequency: Frequency { - times_per_block_range: 1..40, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_replace_op), - frequency: Frequency { - times_per_block_range: 1..5, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_deletion_op), - frequency: Frequency { - times_per_block_range: 1..5, - chance_per_block: None, + // Define the desired stack size + let stack_size = 4 * 1024 * 1024; //Let's set the stack size to be higher than the default 2MB + + let builder = std::thread::Builder::new() + .stack_size(stack_size) + .name("custom_stack_size_thread".into()); + + let handler = builder + .spawn(|| { + let platform_version = PlatformVersion::latest(); + let created_contract = json_document_to_created_contract( + "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", + 1, + true, + platform_version, + ) + .expect("expected to get contract from a json document"); + + let contract = created_contract.data_contract(); + + let document_insertion_op = DocumentOp { + contract: contract.clone(), + action: DocumentAction::DocumentActionInsertRandom( + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let document_replace_op = DocumentOp { + contract: contract.clone(), + action: DocumentActionReplaceRandom, + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let document_deletion_op = DocumentOp { + contract: contract.clone(), + action: DocumentAction::DocumentActionDelete, + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let strategy = NetworkStrategy { + strategy: Strategy { + start_contracts: vec![(created_contract, None)], + operations: vec![ + Operation { + op_type: OperationType::Document(document_insertion_op), + frequency: Frequency { + times_per_block_range: 1..40, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_replace_op), + frequency: Frequency { + times_per_block_range: 1..5, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_deletion_op), + frequency: Frequency { + times_per_block_range: 1..5, + chance_per_block: None, + }, + }, + ], + start_identities: StartIdentities::default(), + identity_inserts: IdentityInsertInfo { + frequency: Frequency { + times_per_block_range: 1..6, + chance_per_block: None, + }, + start_keys: 5, + extra_keys: Default::default(), + start_balance_range: dash_to_duffs!(1)..=dash_to_duffs!(1), }, - }, - ], - start_identities: StartIdentities::default(), - identity_inserts: IdentityInsertInfo { - frequency: Frequency { - times_per_block_range: 1..6, - chance_per_block: None, - }, - start_keys: 5, - extra_keys: Default::default(), - start_balance_range: dash_to_duffs!(1)..=dash_to_duffs!(1), - }, - identity_contract_nonce_gaps: None, - signer: None, - }, - total_hpmns: 100, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: None, - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: true, - ..Default::default() - }; + identity_contract_nonce_gaps: None, + signer: None, + }, + total_hpmns: 100, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: None, + + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: true, + ..Default::default() + }; - let day_in_ms = 1000 * 60 * 60 * 24; + let day_in_ms = 1000 * 60 * 60 * 24; - let config = PlatformConfig { - validator_set: ValidatorSetConfig::default_100_67(), - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - verify_sum_trees: true, - - epoch_time_length_s: 1576800, - ..Default::default() - }, - block_spacing_ms: day_in_ms, - testing_configs: PlatformTestConfig::default_minimal_verifications(), - ..Default::default() - }; - let block_count = 100; - let mut platform = TestPlatformBuilder::new() - .with_config(config.clone()) - .build_with_mock_rpc(); + let config = PlatformConfig { + validator_set: ValidatorSetConfig::default_100_67(), + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + verify_sum_trees: true, - let outcome = - run_chain_for_strategy(&mut platform, block_count, strategy, config, 15, &mut None); - assert_eq!(outcome.identities.len() as u64, 296); - assert_eq!(outcome.masternode_identity_balances.len(), 100); - let balance_count = outcome - .masternode_identity_balances - .into_iter() - .filter(|(_, balance)| *balance != 0) - .count(); - assert_eq!(balance_count, 92); // 1 epoch worth of proposers + epoch_time_length_s: 1576800, + ..Default::default() + }, + block_spacing_ms: day_in_ms, + testing_configs: PlatformTestConfig::default_minimal_verifications(), + ..Default::default() + }; + let block_count = 100; + let mut platform = TestPlatformBuilder::new() + .with_config(config.clone()) + .build_with_mock_rpc(); + + let outcome = run_chain_for_strategy( + &mut platform, + block_count, + strategy, + config, + 15, + &mut None, + ); + assert_eq!(outcome.identities.len() as u64, 296); + assert_eq!(outcome.masternode_identity_balances.len(), 100); + let balance_count = outcome + .masternode_identity_balances + .into_iter() + .filter(|(_, balance)| *balance != 0) + .count(); + assert_eq!(balance_count, 92); // 1 epoch worth of proposers - let issues = outcome - .abci_app - .platform - .drive - .grove - .visualize_verify_grovedb(None, true, false, &platform_version.drive.grove_version) - .expect("expected to have no issues"); + let issues = outcome + .abci_app + .platform + .drive + .grove + .visualize_verify_grovedb( + None, + true, + false, + &platform_version.drive.grove_version, + ) + .expect("expected to have no issues"); - assert_eq!( - issues.len(), - 0, - "issues are {}", - issues - .iter() - .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) - .collect::>() - .join(" | ") - ); + assert_eq!( + issues.len(), + 0, + "issues are {}", + issues + .iter() + .map(|(hash, (a, b, c))| format!("{}: {} {} {}", hash, a, b, c)) + .collect::>() + .join(" | ") + ); + }) + .expect("Failed to create thread with custom stack size"); + // Wait for the thread to finish and assert that it didn't panic. + handler.join().expect("Thread has panicked"); } #[test] fn run_chain_insert_many_new_identity_per_block_many_document_insertions_updates_transfers_and_deletions_with_epoch_change( ) { - let platform_version = PlatformVersion::latest(); - let created_contract = json_document_to_created_contract( - "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", - 1, - true, - platform_version, - ) - .expect("expected to get contract from a json document"); - - let contract = created_contract.data_contract(); - - let document_insertion_op = DocumentOp { - contract: contract.clone(), - action: DocumentAction::DocumentActionInsertRandom( - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let document_replace_op = DocumentOp { - contract: contract.clone(), - action: DocumentActionReplaceRandom, - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let document_transfer_op = DocumentOp { - contract: contract.clone(), - action: DocumentActionTransferRandom, - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let document_deletion_op = DocumentOp { - contract: contract.clone(), - action: DocumentAction::DocumentActionDelete, - document_type: contract - .document_type_for_name("contactRequest") - .expect("expected a profile document type") - .to_owned_document_type(), - }; - - let strategy = NetworkStrategy { - strategy: Strategy { - start_contracts: vec![(created_contract, None)], - operations: vec![ - Operation { - op_type: OperationType::Document(document_insertion_op), - frequency: Frequency { - times_per_block_range: 1..10, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_replace_op), - frequency: Frequency { - times_per_block_range: 1..5, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_transfer_op), - frequency: Frequency { - times_per_block_range: 1..5, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_deletion_op), - frequency: Frequency { - times_per_block_range: 1..5, - chance_per_block: None, + // Define the desired stack size + let stack_size = 4 * 1024 * 1024; //Let's set the stack size to be higher than the default 2MB + + let builder = std::thread::Builder::new() + .stack_size(stack_size) + .name("custom_stack_size_thread".into()); + + let handler = builder + .spawn(|| { + let platform_version = PlatformVersion::latest(); + let created_contract = json_document_to_created_contract( + "tests/supporting_files/contract/dashpay/dashpay-contract-all-mutable.json", + 1, + true, + platform_version, + ) + .expect("expected to get contract from a json document"); + + let contract = created_contract.data_contract(); + + let document_insertion_op = DocumentOp { + contract: contract.clone(), + action: DocumentAction::DocumentActionInsertRandom( + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let document_replace_op = DocumentOp { + contract: contract.clone(), + action: DocumentActionReplaceRandom, + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let document_transfer_op = DocumentOp { + contract: contract.clone(), + action: DocumentActionTransferRandom, + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let document_deletion_op = DocumentOp { + contract: contract.clone(), + action: DocumentAction::DocumentActionDelete, + document_type: contract + .document_type_for_name("contactRequest") + .expect("expected a profile document type") + .to_owned_document_type(), + }; + + let strategy = NetworkStrategy { + strategy: Strategy { + start_contracts: vec![(created_contract, None)], + operations: vec![ + Operation { + op_type: OperationType::Document(document_insertion_op), + frequency: Frequency { + times_per_block_range: 1..10, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_replace_op), + frequency: Frequency { + times_per_block_range: 1..5, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_transfer_op), + frequency: Frequency { + times_per_block_range: 1..5, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_deletion_op), + frequency: Frequency { + times_per_block_range: 1..5, + chance_per_block: None, + }, + }, + ], + start_identities: StartIdentities::default(), + identity_inserts: IdentityInsertInfo { + frequency: Frequency { + times_per_block_range: 1..6, + chance_per_block: None, + }, + start_keys: 5, + extra_keys: Default::default(), + start_balance_range: dash_to_duffs!(1)..=dash_to_duffs!(1), }, - }, - ], - start_identities: StartIdentities::default(), - identity_inserts: IdentityInsertInfo { - frequency: Frequency { - times_per_block_range: 1..6, - chance_per_block: None, - }, - start_keys: 5, - extra_keys: Default::default(), - start_balance_range: dash_to_duffs!(1)..=dash_to_duffs!(1), - }, - identity_contract_nonce_gaps: None, - signer: None, - }, - total_hpmns: 100, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: None, + identity_contract_nonce_gaps: None, + signer: None, + }, + total_hpmns: 100, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: None, + + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: true, + ..Default::default() + }; - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: true, - ..Default::default() - }; + let day_in_ms = 1000 * 60 * 60 * 24; - let day_in_ms = 1000 * 60 * 60 * 24; + let config = PlatformConfig { + validator_set: ValidatorSetConfig::default_100_67(), + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + verify_sum_trees: true, - let config = PlatformConfig { - validator_set: ValidatorSetConfig::default_100_67(), - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - verify_sum_trees: true, - - epoch_time_length_s: 1576800, - ..Default::default() - }, - block_spacing_ms: day_in_ms, - testing_configs: PlatformTestConfig::default_minimal_verifications(), - ..Default::default() - }; - let block_count = 70; - let mut platform = TestPlatformBuilder::new() - .with_config(config.clone()) - .build_with_mock_rpc(); - - let outcome = - run_chain_for_strategy(&mut platform, block_count, strategy, config, 15, &mut None); - assert_eq!(outcome.identities.len() as u64, 201); - assert_eq!(outcome.masternode_identity_balances.len(), 100); - let balance_count = outcome - .masternode_identity_balances - .into_iter() - .filter(|(_, balance)| *balance != 0) - .count(); - assert_eq!(balance_count, 55); // 1 epoch worth of proposers + epoch_time_length_s: 1576800, + ..Default::default() + }, + block_spacing_ms: day_in_ms, + testing_configs: PlatformTestConfig::default_minimal_verifications(), + ..Default::default() + }; + let block_count = 70; + let mut platform = TestPlatformBuilder::new() + .with_config(config.clone()) + .build_with_mock_rpc(); + + let outcome = run_chain_for_strategy( + &mut platform, + block_count, + strategy, + config, + 15, + &mut None, + ); + assert_eq!(outcome.identities.len() as u64, 201); + assert_eq!(outcome.masternode_identity_balances.len(), 100); + let balance_count = outcome + .masternode_identity_balances + .into_iter() + .filter(|(_, balance)| *balance != 0) + .count(); + assert_eq!(balance_count, 55); // 1 epoch worth of proposers + }) + .expect("Failed to create thread with custom stack size"); + // Wait for the thread to finish and assert that it didn't panic. + handler.join().expect("Thread has panicked"); } #[test] diff --git a/packages/rs-drive-abci/tests/strategy_tests/verify_state_transitions.rs b/packages/rs-drive-abci/tests/strategy_tests/verify_state_transitions.rs index a7f8b30c999..324f15baa70 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/verify_state_transitions.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/verify_state_transitions.rs @@ -204,10 +204,13 @@ pub(crate) fn verify_state_transitions_were_or_were_not_executed( platform.state.last_committed_block_info() ); if *was_executed { - assert_eq!( - &contract.expect("expected a contract"), - data_contract_update.data_contract_ref(), - ); + assert!(contract + .expect("expected a contract") + .equal_ignoring_time_fields( + data_contract_update.data_contract_ref(), + platform_version + ) + .expect("expected to be able to check equality"),); } else if contract.is_some() { //there is the possibility that the state transition was not executed and the state is equal to the previous // state, aka there would have been no change anyways, we can discount that for now @@ -268,10 +271,8 @@ pub(crate) fn verify_state_transitions_were_or_were_not_executed( } BatchedTransitionAction::TokenAction(token_transition_action) => { if token_transition_action - .base() - .token_configuration() - .expect("expected token configuration") .keeps_history() + .expect("expected no error in token action keeps history") { // if we keep history we just need to check the historical document proofs_request.documents.push( @@ -507,10 +508,8 @@ pub(crate) fn verify_state_transitions_were_or_were_not_executed( } BatchedTransitionAction::TokenAction(token_transition_action) => { if token_transition_action - .base() - .token_configuration() - .expect("expected token configuration") .keeps_history() + .expect("expected no error in token action keeps history") { let token_id = token_transition_action.base().token_id(); let document_type_name = token_transition_action diff --git a/packages/rs-drive-abci/tests/strategy_tests/voting_tests.rs b/packages/rs-drive-abci/tests/strategy_tests/voting_tests.rs index d0e804dfa93..08c4b833bf1 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/voting_tests.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/voting_tests.rs @@ -1702,1130 +1702,1182 @@ mod tests { #[test] fn run_chain_with_voting_after_won_by_identity_with_specialized_funds_distribution() { - // In this test we try to insert two state transitions with the same unique index - // We use the DPNS contract, and we insert two documents both with the same "name" - // This is a common scenario we should see quite often - let config = PlatformConfig { - testing_configs: PlatformTestConfig::default_minimal_verifications(), - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - //we disable document triggers because we are using dpns and dpns needs a preorder - use_document_triggers: false, + // Define the desired stack size + let stack_size = 4 * 1024 * 1024; //Let's set the stack size to be higher than the default 2MB + + let builder = std::thread::Builder::new() + .stack_size(stack_size) + .name("custom_stack_size_thread".into()); + + let handler = builder + .spawn(|| { + // In this test we try to insert two state transitions with the same unique index + // We use the DPNS contract, and we insert two documents both with the same "name" + // This is a common scenario we should see quite often + let config = PlatformConfig { + testing_configs: PlatformTestConfig::default_minimal_verifications(), + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + //we disable document triggers because we are using dpns and dpns needs a preorder + use_document_triggers: false, + + ..Default::default() + }, + block_spacing_ms: 3000, + ..Default::default() + }; + let mut platform = TestPlatformBuilder::new() + .with_config(config.clone()) + .build_with_mock_rpc(); + + let platform_version = PlatformVersion::latest(); + + let mut rng = StdRng::seed_from_u64(567); + + let mut simple_signer = SimpleSigner::default(); + + let (mut identity1, keys1) = + Identity::random_identity_with_main_keys_with_private_key::>( + 2, + &mut rng, + platform_version, + ) + .unwrap(); + + simple_signer.add_keys(keys1); + + let (mut identity2, keys2) = + Identity::random_identity_with_main_keys_with_private_key::>( + 2, + &mut rng, + platform_version, + ) + .unwrap(); + + simple_signer.add_keys(keys2); + + let start_identities: Vec<(Identity, Option)> = + create_state_transitions_for_identities( + vec![&mut identity1, &mut identity2], + &(dash_to_duffs!(1)..=dash_to_duffs!(1)), + &simple_signer, + &mut rng, + platform_version, + ) + .into_iter() + .map(|(identity, transition)| (identity, Some(transition))) + .collect(); + + let dpns_contract = platform + .drive + .cache + .system_data_contracts + .load_dpns() + .as_ref() + .clone(); + + let document_type = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type") + .to_owned_document_type(); + + let identity1_id = start_identities.first().unwrap().0.id(); + let identity2_id = start_identities.last().unwrap().0.id(); + let document_op_1 = DocumentOp { + contract: dpns_contract.clone(), + action: DocumentAction::DocumentActionInsertSpecific( + BTreeMap::from([ + ("label".into(), "quantum".into()), + ("normalizedLabel".into(), "quantum".into()), + ("normalizedParentDomainName".into(), "dash".into()), + ( + "records".into(), + BTreeMap::from([("identity", Value::from(identity1_id))]).into(), + ), + ]), + Some(start_identities.first().unwrap().0.id()), + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: document_type.clone(), + }; + + let document_op_2 = DocumentOp { + contract: dpns_contract.clone(), + action: DocumentAction::DocumentActionInsertSpecific( + BTreeMap::from([ + ("label".into(), "quantum".into()), + ("normalizedLabel".into(), "quantum".into()), + ("normalizedParentDomainName".into(), "dash".into()), + ( + "records".into(), + BTreeMap::from([( + "identity", + Value::from(start_identities.last().unwrap().0.id()), + )]) + .into(), + ), + ]), + Some(start_identities.last().unwrap().0.id()), + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: document_type.clone(), + }; + + let strategy = NetworkStrategy { + strategy: Strategy { + start_contracts: vec![], + operations: vec![ + Operation { + op_type: OperationType::Document(document_op_1), + frequency: Frequency { + times_per_block_range: 1..2, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_op_2), + frequency: Frequency { + times_per_block_range: 1..2, + chance_per_block: None, + }, + }, + ], + start_identities: StartIdentities { + hard_coded: start_identities, + ..Default::default() + }, + identity_inserts: Default::default(), - ..Default::default() - }, - block_spacing_ms: 3000, - ..Default::default() + identity_contract_nonce_gaps: None, + signer: Some(simple_signer), + }, + total_hpmns: 100, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: None, + + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: true, + ..Default::default() + }; + + let mut voting_signer = Some(SimpleSigner::default()); + + // On the first block we only have identities and contracts + let ChainExecutionOutcome { + abci_app, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + current_proposer_versions, + end_time_ms, + identity_nonce_counter, + identity_contract_nonce_counter, + state_transition_results_per_block, + .. + } = run_chain_for_strategy( + &mut platform, + 2, + strategy.clone(), + config.clone(), + 15, + &mut voting_signer, + ); + + let platform = abci_app.platform; + + let platform_state = platform.state.load(); + + let state_transitions_block_2 = state_transition_results_per_block + .get(&2) + .expect("expected to get block 2"); + + let first_document_insert_result = &state_transitions_block_2 + .first() + .as_ref() + .expect("expected a document insert") + .1; + assert_eq!(first_document_insert_result.code, 0); + + let second_document_insert_result = &state_transitions_block_2 + .get(1) + .as_ref() + .expect("expected a document insert") + .1; + + assert_eq!(second_document_insert_result.code, 0); // we expect the second to also be insertable as they are both contested + + let block_start = platform_state + .last_committed_block_info() + .as_ref() + .unwrap() + .basic_info() + .height + + 1; + let day_in_ms = 1000 * 60 * 60 * 24; + let config = PlatformConfig { + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + //we disable document triggers because we are using dpns and dpns needs a preorder + use_document_triggers: false, + + ..Default::default() + }, + block_spacing_ms: day_in_ms, + ..Default::default() + }; + + let outcome = continue_chain_for_strategy( + abci_app, + ChainExecutionParameters { + block_start, + core_height_start: 1, + block_count: 16, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + current_proposer_versions: Some(current_proposer_versions.clone()), + current_identity_nonce_counter: identity_nonce_counter, + current_identity_contract_nonce_counter: identity_contract_nonce_counter, + current_votes: BTreeMap::default(), + start_time_ms: 1681094380000, + current_time_ms: end_time_ms, + current_identities: Vec::new(), + }, + NetworkStrategy { + strategy: Strategy { + start_contracts: vec![], + operations: vec![Operation { + op_type: OperationType::ResourceVote(ResourceVoteOp { + resolved_vote_poll: + ContestedDocumentResourceVotePollWithContractInfo { + contract: + DataContractOwnedResolvedInfo::OwnedDataContract( + dpns_contract.clone(), + ), + document_type_name: "domain".to_string(), + index_name: "parentNameAndLabel".to_string(), + index_values: vec!["dash".into(), "quantum".into()], + }, + action: VoteAction { + vote_choices_with_weights: vec![ + (ResourceVoteChoice::Abstain, 1), + (ResourceVoteChoice::Lock, 1), + (ResourceVoteChoice::TowardsIdentity(identity1_id), 2), + (ResourceVoteChoice::TowardsIdentity(identity2_id), 10), + ], + }, + }), + frequency: Frequency { + times_per_block_range: 1..3, + chance_per_block: None, + }, + }], + start_identities: StartIdentities::default(), + identity_inserts: Default::default(), + + identity_contract_nonce_gaps: None, + signer: voting_signer, + }, + total_hpmns: 100, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: None, + + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: true, + ..Default::default() + }, + config.clone(), + StrategyRandomness::SeedEntropy(9), + ); + + let platform = outcome.abci_app.platform; + + // Now let's run a query for the vote totals + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) + .expect("expected to encode the word dash"); + + let quantum_encoded = + bincode::encode_to_vec(Value::Text("quantum".to_string()), config) + .expect("expected to encode the word quantum"); + + let index_name = "parentNameAndLabel".to_string(); + + let query_validation_result = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: document_type.name().clone(), + index_name: index_name.clone(), + index_values: vec![ + dash_encoded.clone(), + quantum_encoded.clone(), + ], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: true, + start_at_identifier_info: None, + count: None, + prove: false, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = query_validation_result.version.expect("expected a version"); + + let Some( + get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( + get_contested_resource_vote_state_response_v0::ContestedResourceContenders { + contenders, + abstain_vote_tally, + lock_vote_tally, + finished_vote_info, + }, + ), + ) = result + else { + panic!("expected contenders") }; - let mut platform = TestPlatformBuilder::new() - .with_config(config.clone()) - .build_with_mock_rpc(); - let platform_version = PlatformVersion::latest(); + assert_eq!(contenders.len(), 2); - let mut rng = StdRng::seed_from_u64(567); + let first_contender = contenders.first().unwrap(); - let mut simple_signer = SimpleSigner::default(); + let second_contender = contenders.last().unwrap(); - let (mut identity1, keys1) = Identity::random_identity_with_main_keys_with_private_key::< - Vec<_>, - >(2, &mut rng, platform_version) - .unwrap(); + assert_eq!(first_contender.identifier, identity2_id.to_vec()); - simple_signer.add_keys(keys1); + assert_eq!(second_contender.identifier, identity1_id.to_vec()); - let (mut identity2, keys2) = Identity::random_identity_with_main_keys_with_private_key::< - Vec<_>, - >(2, &mut rng, platform_version) - .unwrap(); + // All vote counts are weighted, so for evonodes, these are in multiples of 4 - simple_signer.add_keys(keys2); + // 19 votes were cast - let start_identities: Vec<(Identity, Option)> = - create_state_transitions_for_identities( - vec![&mut identity1, &mut identity2], - &(dash_to_duffs!(1)..=dash_to_duffs!(1)), - &simple_signer, - &mut rng, - platform_version, - ) - .into_iter() - .map(|(identity, transition)| (identity, Some(transition))) - .collect(); + assert_eq!(first_contender.vote_count, Some(60)); - let dpns_contract = platform - .drive - .cache - .system_data_contracts - .load_dpns() - .as_ref() - .clone(); + assert_eq!(second_contender.vote_count, Some(4)); - let document_type = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type") - .to_owned_document_type(); + assert_eq!(lock_vote_tally, Some(4)); - let identity1_id = start_identities.first().unwrap().0.id(); - let identity2_id = start_identities.last().unwrap().0.id(); - let document_op_1 = DocumentOp { - contract: dpns_contract.clone(), - action: DocumentAction::DocumentActionInsertSpecific( - BTreeMap::from([ - ("label".into(), "quantum".into()), - ("normalizedLabel".into(), "quantum".into()), - ("normalizedParentDomainName".into(), "dash".into()), - ( - "records".into(), - BTreeMap::from([("identity", Value::from(identity1_id))]).into(), - ), - ]), - Some(start_identities.first().unwrap().0.id()), - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: document_type.clone(), - }; + assert_eq!(abstain_vote_tally, Some(8)); - let document_op_2 = DocumentOp { - contract: dpns_contract.clone(), - action: DocumentAction::DocumentActionInsertSpecific( - BTreeMap::from([ - ("label".into(), "quantum".into()), - ("normalizedLabel".into(), "quantum".into()), - ("normalizedParentDomainName".into(), "dash".into()), - ( - "records".into(), - BTreeMap::from([( - "identity", - Value::from(start_identities.last().unwrap().0.id()), - )]) - .into(), - ), - ]), - Some(start_identities.last().unwrap().0.id()), - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: document_type.clone(), - }; + assert_eq!( + finished_vote_info, + Some(FinishedVoteInfo { + finished_vote_outcome: FinishedVoteOutcome::TowardsIdentity.into(), + won_by_identity_id: Some(identity2_id.to_vec()), + finished_at_block_height: 17, + finished_at_core_block_height: 1, + finished_at_block_time_ms: 1682303986000, + finished_at_epoch: 1 + }) + ); + + // not let's see how much is in processing pools + + let processing_fees = platform + .drive + .get_epoch_processing_credits_for_distribution( + &Epoch::new(1).unwrap(), + None, + platform_version, + ) + .expect("expected to get processing fees made in epoch"); + + // A vote costs 10_000_000 + // We did 5 votes in this epoch, + // We had 39_810_000_000 left over, which is only the cost of 19 votes + // So we basically have 39_810_000_000 + 50_000_000 + assert_eq!(processing_fees, 39_860_000_000); + }) + .expect("Failed to create thread with custom stack size"); - let strategy = NetworkStrategy { - strategy: Strategy { - start_contracts: vec![], - operations: vec![ - Operation { - op_type: OperationType::Document(document_op_1), - frequency: Frequency { - times_per_block_range: 1..2, - chance_per_block: None, - }, + // Wait for the thread to finish and assert that it didn't panic. + handler.join().expect("Thread has panicked"); + } + + #[test] + fn run_chain_with_voting_after_won_by_identity_no_specialized_funds_distribution_until_version_8( + ) { + // Define the desired stack size + let stack_size = 4 * 1024 * 1024; //Let's set the stack size to be higher than the default 2MB + + let builder = std::thread::Builder::new() + .stack_size(stack_size) + .name("custom_stack_size_thread".into()); + + let handler = builder + .spawn(|| { + // In this test the goal is to verify that when we hit version 8 that the specialized balances + // that hadn't been properly distributed are distributed. + let config = PlatformConfig { + validator_set: ValidatorSetConfig { + quorum_size: 10, + ..Default::default() }, - Operation { - op_type: OperationType::Document(document_op_2), - frequency: Frequency { - times_per_block_range: 1..2, - chance_per_block: None, + testing_configs: PlatformTestConfig::default_minimal_verifications(), + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + //we disable document triggers because we are using dpns and dpns needs a preorder + use_document_triggers: false, + + ..Default::default() + }, + block_spacing_ms: 3000, + ..Default::default() + }; + let mut platform = TestPlatformBuilder::new() + .with_config(config.clone()) + .with_initial_protocol_version(7) + .build_with_mock_rpc(); + + let platform_version = PlatformVersion::get(7).unwrap(); + + let mut rng = StdRng::seed_from_u64(567); + + let mut simple_signer = SimpleSigner::default(); + + let (mut identity1, keys1) = + Identity::random_identity_with_main_keys_with_private_key::>( + 2, + &mut rng, + platform_version, + ) + .unwrap(); + + simple_signer.add_keys(keys1); + + let (mut identity2, keys2) = + Identity::random_identity_with_main_keys_with_private_key::>( + 2, + &mut rng, + platform_version, + ) + .unwrap(); + + simple_signer.add_keys(keys2); + + let start_identities: Vec<(Identity, Option)> = + create_state_transitions_for_identities( + vec![&mut identity1, &mut identity2], + &(dash_to_duffs!(1)..=dash_to_duffs!(1)), + &simple_signer, + &mut rng, + platform_version, + ) + .into_iter() + .map(|(identity, transition)| (identity, Some(transition))) + .collect(); + + let dpns_contract = platform + .drive + .cache + .system_data_contracts + .load_dpns() + .as_ref() + .clone(); + + let document_type = dpns_contract + .document_type_for_name("domain") + .expect("expected a profile document type") + .to_owned_document_type(); + + let identity1_id = start_identities.first().unwrap().0.id(); + let identity2_id = start_identities.last().unwrap().0.id(); + let document_op_1 = DocumentOp { + contract: dpns_contract.clone(), + action: DocumentAction::DocumentActionInsertSpecific( + BTreeMap::from([ + ("label".into(), "quantum".into()), + ("normalizedLabel".into(), "quantum".into()), + ("normalizedParentDomainName".into(), "dash".into()), + ( + "records".into(), + BTreeMap::from([("identity", Value::from(identity1_id))]).into(), + ), + ]), + Some(identity1_id), + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: document_type.clone(), + }; + + let document_op_2 = DocumentOp { + contract: dpns_contract.clone(), + action: DocumentAction::DocumentActionInsertSpecific( + BTreeMap::from([ + ("label".into(), "quantum".into()), + ("normalizedLabel".into(), "quantum".into()), + ("normalizedParentDomainName".into(), "dash".into()), + ( + "records".into(), + BTreeMap::from([( + "identity", + Value::from(start_identities.last().unwrap().0.id()), + )]) + .into(), + ), + ]), + Some(identity2_id), + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: document_type.clone(), + }; + + let strategy = NetworkStrategy { + strategy: Strategy { + start_contracts: vec![], + operations: vec![ + Operation { + op_type: OperationType::Document(document_op_1), + frequency: Frequency { + times_per_block_range: 1..2, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_op_2), + frequency: Frequency { + times_per_block_range: 1..2, + chance_per_block: None, + }, + }, + ], + start_identities: StartIdentities { + hard_coded: start_identities, + ..Default::default() }, + identity_inserts: Default::default(), + + identity_contract_nonce_gaps: None, + signer: Some(simple_signer.clone()), }, - ], - start_identities: StartIdentities { - hard_coded: start_identities, + total_hpmns: 20, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: Some(UpgradingInfo { + current_protocol_version: 7, + proposed_protocol_versions_with_weight: vec![(7, 1)], + upgrade_three_quarters_life: 0.2, + }), + + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: true, ..Default::default() - }, - identity_inserts: Default::default(), + }; + + let mut voting_signer = Some(SimpleSigner::default()); + + // On the first block we only have identities and contracts + let ChainExecutionOutcome { + abci_app, + identities, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + current_proposer_versions, + end_time_ms, + identity_nonce_counter, + identity_contract_nonce_counter, + state_transition_results_per_block, + .. + } = run_chain_for_strategy( + &mut platform, + 2, + strategy.clone(), + config.clone(), + 15, + &mut voting_signer, + ); + + let platform = abci_app.platform; + + let platform_state = platform.state.load(); + + let state_transitions_block_2 = state_transition_results_per_block + .get(&2) + .expect("expected to get block 2"); + + let first_document_insert_result = &state_transitions_block_2 + .first() + .as_ref() + .expect("expected a document insert") + .1; + assert_eq!(first_document_insert_result.code, 0); + + let second_document_insert_result = &state_transitions_block_2 + .get(1) + .as_ref() + .expect("expected a document insert") + .1; + + assert_eq!(second_document_insert_result.code, 0); // we expect the second to also be insertable as they are both contested + + let block_start = platform_state + .last_committed_block_info() + .as_ref() + .unwrap() + .basic_info() + .height + + 1; + let day_in_ms = 1000 * 60 * 60 * 24; + let config = PlatformConfig { + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + //we disable document triggers because we are using dpns and dpns needs a preorder + use_document_triggers: false, + + ..Default::default() + }, + block_spacing_ms: day_in_ms, + ..Default::default() + }; + + // On the first block we only have identities and contracts + let ChainExecutionOutcome { + abci_app, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + end_time_ms, + identity_nonce_counter, + identity_contract_nonce_counter, + .. + } = continue_chain_for_strategy( + abci_app, + ChainExecutionParameters { + block_start, + core_height_start: 1, + block_count: 16, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + current_proposer_versions: Some(current_proposer_versions.clone()), + current_identity_nonce_counter: identity_nonce_counter, + current_identity_contract_nonce_counter: identity_contract_nonce_counter, + current_votes: BTreeMap::default(), + start_time_ms: 1681094380000, + current_time_ms: end_time_ms, + current_identities: Vec::new(), + }, + NetworkStrategy { + strategy: Strategy { + start_contracts: vec![], + operations: vec![Operation { + op_type: OperationType::ResourceVote(ResourceVoteOp { + resolved_vote_poll: + ContestedDocumentResourceVotePollWithContractInfo { + contract: + DataContractOwnedResolvedInfo::OwnedDataContract( + dpns_contract.clone(), + ), + document_type_name: "domain".to_string(), + index_name: "parentNameAndLabel".to_string(), + index_values: vec!["dash".into(), "quantum".into()], + }, + action: VoteAction { + vote_choices_with_weights: vec![ + (ResourceVoteChoice::Abstain, 1), + (ResourceVoteChoice::Lock, 1), + (ResourceVoteChoice::TowardsIdentity(identity1_id), 2), + (ResourceVoteChoice::TowardsIdentity(identity2_id), 10), + ], + }, + }), + frequency: Frequency { + times_per_block_range: 1..3, + chance_per_block: None, + }, + }], + start_identities: StartIdentities::default(), + identity_inserts: Default::default(), + + identity_contract_nonce_gaps: None, + signer: voting_signer, + }, + total_hpmns: 20, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: Some(UpgradingInfo { + current_protocol_version: 7, + proposed_protocol_versions_with_weight: vec![(7, 1)], + upgrade_three_quarters_life: 0.2, + }), - identity_contract_nonce_gaps: None, - signer: Some(simple_signer), - }, - total_hpmns: 100, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: None, + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: true, + ..Default::default() + }, + config.clone(), + StrategyRandomness::SeedEntropy(9), + ); + + let platform = abci_app.platform; + + // Now let's run a query for the vote totals + + let bincode_config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let dash_encoded = + bincode::encode_to_vec(Value::Text("dash".to_string()), bincode_config) + .expect("expected to encode the word dash"); + + let quantum_encoded = + bincode::encode_to_vec(Value::Text("quantum".to_string()), bincode_config) + .expect("expected to encode the word quantum"); + + let index_name = "parentNameAndLabel".to_string(); + + let query_validation_result = platform + .query_contested_resource_vote_state( + GetContestedResourceVoteStateRequest { + version: Some(get_contested_resource_vote_state_request::Version::V0( + GetContestedResourceVoteStateRequestV0 { + contract_id: dpns_contract.id().to_vec(), + document_type_name: document_type.name().clone(), + index_name: index_name.clone(), + index_values: vec![ + dash_encoded.clone(), + quantum_encoded.clone(), + ], + result_type: ResultType::DocumentsAndVoteTally as i32, + allow_include_locked_and_abstaining_vote_tally: true, + start_at_identifier_info: None, + count: None, + prove: false, + }, + )), + }, + &platform_state, + platform_version, + ) + .expect("expected to execute query") + .into_data() + .expect("expected query to be valid"); + + let get_contested_resource_vote_state_response::Version::V0( + GetContestedResourceVoteStateResponseV0 { + metadata: _, + result, + }, + ) = query_validation_result.version.expect("expected a version"); - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: true, - ..Default::default() + let Some( + get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( + get_contested_resource_vote_state_response_v0::ContestedResourceContenders { + contenders, + abstain_vote_tally, + lock_vote_tally, + finished_vote_info, + }, + ), + ) = result + else { + panic!("expected contenders") }; - let mut voting_signer = Some(SimpleSigner::default()); + assert_eq!(contenders.len(), 2); - // On the first block we only have identities and contracts - let ChainExecutionOutcome { - abci_app, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - current_proposer_versions, - end_time_ms, - identity_nonce_counter, - identity_contract_nonce_counter, - state_transition_results_per_block, - .. - } = run_chain_for_strategy( - &mut platform, - 2, - strategy.clone(), - config.clone(), - 15, - &mut voting_signer, - ); + let first_contender = contenders.first().unwrap(); - let platform = abci_app.platform; + let second_contender = contenders.last().unwrap(); - let platform_state = platform.state.load(); + assert_eq!(first_contender.identifier, identity2_id.to_vec()); - let state_transitions_block_2 = state_transition_results_per_block - .get(&2) - .expect("expected to get block 2"); + assert_eq!(second_contender.identifier, identity1_id.to_vec()); - let first_document_insert_result = &state_transitions_block_2 - .first() - .as_ref() - .expect("expected a document insert") - .1; - assert_eq!(first_document_insert_result.code, 0); + // All vote counts are weighted, so for evonodes, these are in multiples of 4 - let second_document_insert_result = &state_transitions_block_2 - .get(1) - .as_ref() - .expect("expected a document insert") - .1; + assert_eq!( + ( + first_contender.vote_count, + second_contender.vote_count, + lock_vote_tally, + abstain_vote_tally + ), + (Some(64), Some(8), Some(0), Some(0)) + ); - assert_eq!(second_document_insert_result.code, 0); // we expect the second to also be insertable as they are both contested + assert_eq!( + finished_vote_info, + Some(FinishedVoteInfo { + finished_vote_outcome: FinishedVoteOutcome::TowardsIdentity.into(), + won_by_identity_id: Some(identity2_id.to_vec()), + finished_at_block_height: 17, + finished_at_core_block_height: 1, + finished_at_block_time_ms: 1682303986000, + finished_at_epoch: 1 + }) + ); + + // not let's see how much is in processing pools + + let processing_fees = platform + .drive + .get_epoch_processing_credits_for_distribution( + &Epoch::new(1).unwrap(), + None, + platform_version, + ) + .expect("expected to get processing fees made in epoch"); + + // A vote costs 10_000_000 + // Hence we did 4 votes in this epoch + assert_eq!(processing_fees, 40_000_000); + + // Now let's upgrade to version 8 + + let platform = abci_app.platform; + + let platform_state = platform.state.load(); + + let block_start = platform_state + .last_committed_block_info() + .as_ref() + .unwrap() + .basic_info() + .height + + 1; + + let ten_hours_in_ms = 1000 * 60 * 60 * 10; + let config = PlatformConfig { + chain_lock: ChainLockConfig::default_100_67(), + instant_lock: InstantLockConfig::default_100_67(), + execution: ExecutionConfig { + //we disable document triggers because we are using dpns and dpns needs a preorder + use_document_triggers: false, + + ..Default::default() + }, + block_spacing_ms: ten_hours_in_ms, + ..Default::default() + }; + + // We go 45 blocks later + let ChainExecutionOutcome { + abci_app, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + end_time_ms, + identity_nonce_counter, + identity_contract_nonce_counter, + .. + } = continue_chain_for_strategy( + abci_app, + ChainExecutionParameters { + block_start, + core_height_start: 1, + block_count: 45, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + current_proposer_versions: None, + current_identity_nonce_counter: identity_nonce_counter, + current_identity_contract_nonce_counter: identity_contract_nonce_counter, + current_votes: BTreeMap::default(), + start_time_ms: 1681094380000, + current_time_ms: end_time_ms, + current_identities: Vec::new(), + }, + NetworkStrategy { + strategy: Strategy { + start_contracts: vec![], + operations: vec![], + start_identities: StartIdentities::default(), + identity_inserts: Default::default(), + + identity_contract_nonce_gaps: None, + signer: None, + }, + total_hpmns: 20, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: Some(UpgradingInfo { + current_protocol_version: 8, + proposed_protocol_versions_with_weight: vec![(8, 1)], + upgrade_three_quarters_life: 0.1, + }), - let block_start = platform_state - .last_committed_block_info() - .as_ref() - .unwrap() - .basic_info() - .height - + 1; - let day_in_ms = 1000 * 60 * 60 * 24; - let config = PlatformConfig { - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - //we disable document triggers because we are using dpns and dpns needs a preorder - use_document_triggers: false, + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: false, + ..Default::default() + }, + config.clone(), + StrategyRandomness::SeedEntropy(9203), + ); + + let platform = abci_app.platform; + + let platform_state = platform.state.load(); + + let mut block_start = platform_state + .last_committed_block_info() + .as_ref() + .unwrap() + .basic_info() + .height + + 1; + + // We need to create a few more contests + + let document_op_1 = DocumentOp { + contract: dpns_contract.clone(), + action: DocumentAction::DocumentActionInsertSpecific( + BTreeMap::from([ + ("label".into(), "sam".into()), + ("normalizedLabel".into(), "sam".into()), + ("normalizedParentDomainName".into(), "dash".into()), + ("parentDomainName".into(), "dash".into()), + ( + "records".into(), + BTreeMap::from([("identity", Value::from(identity1_id))]).into(), + ), + ]), + Some(identity1_id), + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: document_type.clone(), + }; + + let document_op_2 = DocumentOp { + contract: dpns_contract.clone(), + action: DocumentAction::DocumentActionInsertSpecific( + BTreeMap::from([ + ("label".into(), "sam".into()), + ("normalizedLabel".into(), "sam".into()), + ("normalizedParentDomainName".into(), "dash".into()), + ("parentDomainName".into(), "dash".into()), + ( + "records".into(), + BTreeMap::from([("identity", Value::from(identity2_id))]).into(), + ), + ]), + Some(identity2_id), + DocumentFieldFillType::FillIfNotRequired, + DocumentFieldFillSize::AnyDocumentFillSize, + ), + document_type: document_type.clone(), + }; + + let ChainExecutionOutcome { + abci_app, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + end_time_ms, + identity_nonce_counter, + identity_contract_nonce_counter, + .. + } = continue_chain_for_strategy( + abci_app, + ChainExecutionParameters { + block_start, + core_height_start: 1, + block_count: 1, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + current_proposer_versions: None, + current_identity_nonce_counter: identity_nonce_counter, + current_identity_contract_nonce_counter: identity_contract_nonce_counter, + current_votes: BTreeMap::default(), + start_time_ms: 1681094380000, + current_time_ms: end_time_ms, + current_identities: identities, + }, + NetworkStrategy { + strategy: Strategy { + start_contracts: vec![], + operations: vec![ + Operation { + op_type: OperationType::Document(document_op_1), + frequency: Frequency { + times_per_block_range: 1..2, + chance_per_block: None, + }, + }, + Operation { + op_type: OperationType::Document(document_op_2), + frequency: Frequency { + times_per_block_range: 1..2, + chance_per_block: None, + }, + }, + ], + start_identities: StartIdentities::default(), + identity_inserts: Default::default(), + + identity_contract_nonce_gaps: None, + signer: Some(simple_signer), + }, + total_hpmns: 20, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: Some(UpgradingInfo { + current_protocol_version: 8, + proposed_protocol_versions_with_weight: vec![(8, 1)], + upgrade_three_quarters_life: 0.1, + }), - ..Default::default() - }, - block_spacing_ms: day_in_ms, - ..Default::default() - }; - - let outcome = continue_chain_for_strategy( - abci_app, - ChainExecutionParameters { - block_start, - core_height_start: 1, - block_count: 16, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - current_proposer_versions: Some(current_proposer_versions.clone()), - current_identity_nonce_counter: identity_nonce_counter, - current_identity_contract_nonce_counter: identity_contract_nonce_counter, - current_votes: BTreeMap::default(), - start_time_ms: 1681094380000, - current_time_ms: end_time_ms, - current_identities: Vec::new(), - }, - NetworkStrategy { - strategy: Strategy { - start_contracts: vec![], - operations: vec![Operation { - op_type: OperationType::ResourceVote(ResourceVoteOp { - resolved_vote_poll: ContestedDocumentResourceVotePollWithContractInfo { - contract: DataContractOwnedResolvedInfo::OwnedDataContract( - dpns_contract.clone(), - ), - document_type_name: "domain".to_string(), - index_name: "parentNameAndLabel".to_string(), - index_values: vec!["dash".into(), "quantum".into()], - }, - action: VoteAction { - vote_choices_with_weights: vec![ - (ResourceVoteChoice::Abstain, 1), - (ResourceVoteChoice::Lock, 1), - (ResourceVoteChoice::TowardsIdentity(identity1_id), 2), - (ResourceVoteChoice::TowardsIdentity(identity2_id), 10), - ], - }, - }), - frequency: Frequency { - times_per_block_range: 1..3, - chance_per_block: None, - }, - }], - start_identities: StartIdentities::default(), - identity_inserts: Default::default(), - - identity_contract_nonce_gaps: None, - signer: voting_signer, - }, - total_hpmns: 100, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: None, - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: true, - ..Default::default() - }, - config.clone(), - StrategyRandomness::SeedEntropy(9), - ); - - let platform = outcome.abci_app.platform; - - // Now let's run a query for the vote totals - - let config = bincode::config::standard() - .with_big_endian() - .with_no_limit(); - - let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), config) - .expect("expected to encode the word dash"); - - let quantum_encoded = bincode::encode_to_vec(Value::Text("quantum".to_string()), config) - .expect("expected to encode the word quantum"); - - let index_name = "parentNameAndLabel".to_string(); - - let query_validation_result = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: document_type.name().clone(), - index_name: index_name.clone(), - index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: true, - start_at_identifier_info: None, - count: None, - prove: false, + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: false, + ..Default::default() + }, + config.clone(), + StrategyRandomness::SeedEntropy(9203), + ); + + block_start += 1; + + // We go 14 blocks later till version 8 is active + let outcome = continue_chain_for_strategy( + abci_app, + ChainExecutionParameters { + block_start, + core_height_start: 1, + block_count: 14, + proposers, + validator_quorums, + current_validator_quorum_hash, + instant_lock_quorums, + current_proposer_versions: None, + current_identity_nonce_counter: identity_nonce_counter, + current_identity_contract_nonce_counter: identity_contract_nonce_counter, + current_votes: BTreeMap::default(), + start_time_ms: 1681094380000, + current_time_ms: end_time_ms, + current_identities: Vec::new(), + }, + NetworkStrategy { + strategy: Strategy { + start_contracts: vec![], + operations: vec![], + start_identities: StartIdentities::default(), + identity_inserts: Default::default(), + + identity_contract_nonce_gaps: None, + signer: None, }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = query_validation_result.version.expect("expected a version"); - - let Some( - get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( - get_contested_resource_vote_state_response_v0::ContestedResourceContenders { - contenders, - abstain_vote_tally, - lock_vote_tally, - finished_vote_info, - }, - ), - ) = result - else { - panic!("expected contenders") - }; - - assert_eq!(contenders.len(), 2); - - let first_contender = contenders.first().unwrap(); - - let second_contender = contenders.last().unwrap(); - - assert_eq!(first_contender.identifier, identity2_id.to_vec()); - - assert_eq!(second_contender.identifier, identity1_id.to_vec()); - - // All vote counts are weighted, so for evonodes, these are in multiples of 4 - - // 19 votes were cast - - assert_eq!(first_contender.vote_count, Some(60)); - - assert_eq!(second_contender.vote_count, Some(4)); - - assert_eq!(lock_vote_tally, Some(4)); - - assert_eq!(abstain_vote_tally, Some(8)); + total_hpmns: 20, + extra_normal_mns: 0, + validator_quorum_count: 24, + chain_lock_quorum_count: 24, + upgrading_info: Some(UpgradingInfo { + current_protocol_version: 8, + proposed_protocol_versions_with_weight: vec![(8, 1)], + upgrade_three_quarters_life: 0.1, + }), - assert_eq!( - finished_vote_info, - Some(FinishedVoteInfo { - finished_vote_outcome: FinishedVoteOutcome::TowardsIdentity.into(), - won_by_identity_id: Some(identity2_id.to_vec()), - finished_at_block_height: 17, - finished_at_core_block_height: 1, - finished_at_block_time_ms: 1682303986000, - finished_at_epoch: 1 + proposer_strategy: Default::default(), + rotate_quorums: false, + failure_testing: None, + query_testing: None, + verify_state_transition_results: false, + ..Default::default() + }, + config.clone(), + StrategyRandomness::SeedEntropy(9203), + ); + + let platform = outcome.abci_app.platform; + platform + .drive + .fetch_versions_with_counter(None, &platform_version.drive) + .expect("expected to get versions"); + + let state = platform.state.load(); + assert_eq!( + state + .last_committed_block_info() + .as_ref() + .unwrap() + .basic_info() + .epoch + .index, + 4 + ); + assert_eq!(state.current_protocol_version_in_consensus(), 8); + + let processing_fees = platform + .drive + .get_epoch_processing_credits_for_distribution( + &Epoch::new(4).unwrap(), + None, + platform_version, + ) + .expect("expected to get processing fees made in epoch"); + + // A vote costs 10_000_000 + // There were 23 votes total so that means that there would have been 39_780_000_000 left over + // We see that there is 39_780_000_000 to distribute + assert_eq!(processing_fees, 39_780_000_000); }) - ); - - // not let's see how much is in processing pools - - let processing_fees = platform - .drive - .get_epoch_processing_credits_for_distribution( - &Epoch::new(1).unwrap(), - None, - platform_version, - ) - .expect("expected to get processing fees made in epoch"); - - // A vote costs 10_000_000 - // We did 5 votes in this epoch, - // We had 39_810_000_000 left over, which is only the cost of 19 votes - // So we basically have 39_810_000_000 + 50_000_000 - assert_eq!(processing_fees, 39_860_000_000); - } - - #[test] - fn run_chain_with_voting_after_won_by_identity_no_specialized_funds_distribution_until_version_8( - ) { - // In this test the goal is to verify that when we hit version 8 that the specialized balances - // that hadn't been properly distributed are distributed. - let config = PlatformConfig { - validator_set: ValidatorSetConfig { - quorum_size: 10, - ..Default::default() - }, - testing_configs: PlatformTestConfig::default_minimal_verifications(), - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - //we disable document triggers because we are using dpns and dpns needs a preorder - use_document_triggers: false, - - ..Default::default() - }, - block_spacing_ms: 3000, - ..Default::default() - }; - let mut platform = TestPlatformBuilder::new() - .with_config(config.clone()) - .with_initial_protocol_version(7) - .build_with_mock_rpc(); - - let platform_version = PlatformVersion::get(7).unwrap(); - - let mut rng = StdRng::seed_from_u64(567); - - let mut simple_signer = SimpleSigner::default(); - - let (mut identity1, keys1) = Identity::random_identity_with_main_keys_with_private_key::< - Vec<_>, - >(2, &mut rng, platform_version) - .unwrap(); - - simple_signer.add_keys(keys1); - - let (mut identity2, keys2) = Identity::random_identity_with_main_keys_with_private_key::< - Vec<_>, - >(2, &mut rng, platform_version) - .unwrap(); - - simple_signer.add_keys(keys2); - - let start_identities: Vec<(Identity, Option)> = - create_state_transitions_for_identities( - vec![&mut identity1, &mut identity2], - &(dash_to_duffs!(1)..=dash_to_duffs!(1)), - &simple_signer, - &mut rng, - platform_version, - ) - .into_iter() - .map(|(identity, transition)| (identity, Some(transition))) - .collect(); - - let dpns_contract = platform - .drive - .cache - .system_data_contracts - .load_dpns() - .as_ref() - .clone(); - - let document_type = dpns_contract - .document_type_for_name("domain") - .expect("expected a profile document type") - .to_owned_document_type(); + .expect("Failed to create thread with custom stack size"); - let identity1_id = start_identities.first().unwrap().0.id(); - let identity2_id = start_identities.last().unwrap().0.id(); - let document_op_1 = DocumentOp { - contract: dpns_contract.clone(), - action: DocumentAction::DocumentActionInsertSpecific( - BTreeMap::from([ - ("label".into(), "quantum".into()), - ("normalizedLabel".into(), "quantum".into()), - ("normalizedParentDomainName".into(), "dash".into()), - ( - "records".into(), - BTreeMap::from([("identity", Value::from(identity1_id))]).into(), - ), - ]), - Some(identity1_id), - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: document_type.clone(), - }; - - let document_op_2 = DocumentOp { - contract: dpns_contract.clone(), - action: DocumentAction::DocumentActionInsertSpecific( - BTreeMap::from([ - ("label".into(), "quantum".into()), - ("normalizedLabel".into(), "quantum".into()), - ("normalizedParentDomainName".into(), "dash".into()), - ( - "records".into(), - BTreeMap::from([( - "identity", - Value::from(start_identities.last().unwrap().0.id()), - )]) - .into(), - ), - ]), - Some(identity2_id), - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: document_type.clone(), - }; - - let strategy = NetworkStrategy { - strategy: Strategy { - start_contracts: vec![], - operations: vec![ - Operation { - op_type: OperationType::Document(document_op_1), - frequency: Frequency { - times_per_block_range: 1..2, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_op_2), - frequency: Frequency { - times_per_block_range: 1..2, - chance_per_block: None, - }, - }, - ], - start_identities: StartIdentities { - hard_coded: start_identities, - ..Default::default() - }, - identity_inserts: Default::default(), - - identity_contract_nonce_gaps: None, - signer: Some(simple_signer.clone()), - }, - total_hpmns: 20, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: Some(UpgradingInfo { - current_protocol_version: 7, - proposed_protocol_versions_with_weight: vec![(7, 1)], - upgrade_three_quarters_life: 0.2, - }), - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: true, - ..Default::default() - }; - - let mut voting_signer = Some(SimpleSigner::default()); - - // On the first block we only have identities and contracts - let ChainExecutionOutcome { - abci_app, - identities, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - current_proposer_versions, - end_time_ms, - identity_nonce_counter, - identity_contract_nonce_counter, - state_transition_results_per_block, - .. - } = run_chain_for_strategy( - &mut platform, - 2, - strategy.clone(), - config.clone(), - 15, - &mut voting_signer, - ); - - let platform = abci_app.platform; - - let platform_state = platform.state.load(); - - let state_transitions_block_2 = state_transition_results_per_block - .get(&2) - .expect("expected to get block 2"); - - let first_document_insert_result = &state_transitions_block_2 - .first() - .as_ref() - .expect("expected a document insert") - .1; - assert_eq!(first_document_insert_result.code, 0); - - let second_document_insert_result = &state_transitions_block_2 - .get(1) - .as_ref() - .expect("expected a document insert") - .1; - - assert_eq!(second_document_insert_result.code, 0); // we expect the second to also be insertable as they are both contested - - let block_start = platform_state - .last_committed_block_info() - .as_ref() - .unwrap() - .basic_info() - .height - + 1; - let day_in_ms = 1000 * 60 * 60 * 24; - let config = PlatformConfig { - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - //we disable document triggers because we are using dpns and dpns needs a preorder - use_document_triggers: false, - - ..Default::default() - }, - block_spacing_ms: day_in_ms, - ..Default::default() - }; - - // On the first block we only have identities and contracts - let ChainExecutionOutcome { - abci_app, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - end_time_ms, - identity_nonce_counter, - identity_contract_nonce_counter, - .. - } = continue_chain_for_strategy( - abci_app, - ChainExecutionParameters { - block_start, - core_height_start: 1, - block_count: 16, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - current_proposer_versions: Some(current_proposer_versions.clone()), - current_identity_nonce_counter: identity_nonce_counter, - current_identity_contract_nonce_counter: identity_contract_nonce_counter, - current_votes: BTreeMap::default(), - start_time_ms: 1681094380000, - current_time_ms: end_time_ms, - current_identities: Vec::new(), - }, - NetworkStrategy { - strategy: Strategy { - start_contracts: vec![], - operations: vec![Operation { - op_type: OperationType::ResourceVote(ResourceVoteOp { - resolved_vote_poll: ContestedDocumentResourceVotePollWithContractInfo { - contract: DataContractOwnedResolvedInfo::OwnedDataContract( - dpns_contract.clone(), - ), - document_type_name: "domain".to_string(), - index_name: "parentNameAndLabel".to_string(), - index_values: vec!["dash".into(), "quantum".into()], - }, - action: VoteAction { - vote_choices_with_weights: vec![ - (ResourceVoteChoice::Abstain, 1), - (ResourceVoteChoice::Lock, 1), - (ResourceVoteChoice::TowardsIdentity(identity1_id), 2), - (ResourceVoteChoice::TowardsIdentity(identity2_id), 10), - ], - }, - }), - frequency: Frequency { - times_per_block_range: 1..3, - chance_per_block: None, - }, - }], - start_identities: StartIdentities::default(), - identity_inserts: Default::default(), - - identity_contract_nonce_gaps: None, - signer: voting_signer, - }, - total_hpmns: 20, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: Some(UpgradingInfo { - current_protocol_version: 7, - proposed_protocol_versions_with_weight: vec![(7, 1)], - upgrade_three_quarters_life: 0.2, - }), - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: true, - ..Default::default() - }, - config.clone(), - StrategyRandomness::SeedEntropy(9), - ); - - let platform = abci_app.platform; - - // Now let's run a query for the vote totals - - let bincode_config = bincode::config::standard() - .with_big_endian() - .with_no_limit(); - - let dash_encoded = bincode::encode_to_vec(Value::Text("dash".to_string()), bincode_config) - .expect("expected to encode the word dash"); - - let quantum_encoded = - bincode::encode_to_vec(Value::Text("quantum".to_string()), bincode_config) - .expect("expected to encode the word quantum"); - - let index_name = "parentNameAndLabel".to_string(); - - let query_validation_result = platform - .query_contested_resource_vote_state( - GetContestedResourceVoteStateRequest { - version: Some(get_contested_resource_vote_state_request::Version::V0( - GetContestedResourceVoteStateRequestV0 { - contract_id: dpns_contract.id().to_vec(), - document_type_name: document_type.name().clone(), - index_name: index_name.clone(), - index_values: vec![dash_encoded.clone(), quantum_encoded.clone()], - result_type: ResultType::DocumentsAndVoteTally as i32, - allow_include_locked_and_abstaining_vote_tally: true, - start_at_identifier_info: None, - count: None, - prove: false, - }, - )), - }, - &platform_state, - platform_version, - ) - .expect("expected to execute query") - .into_data() - .expect("expected query to be valid"); - - let get_contested_resource_vote_state_response::Version::V0( - GetContestedResourceVoteStateResponseV0 { - metadata: _, - result, - }, - ) = query_validation_result.version.expect("expected a version"); - - let Some( - get_contested_resource_vote_state_response_v0::Result::ContestedResourceContenders( - get_contested_resource_vote_state_response_v0::ContestedResourceContenders { - contenders, - abstain_vote_tally, - lock_vote_tally, - finished_vote_info, - }, - ), - ) = result - else { - panic!("expected contenders") - }; - - assert_eq!(contenders.len(), 2); - - let first_contender = contenders.first().unwrap(); - - let second_contender = contenders.last().unwrap(); - - assert_eq!(first_contender.identifier, identity2_id.to_vec()); - - assert_eq!(second_contender.identifier, identity1_id.to_vec()); - - // All vote counts are weighted, so for evonodes, these are in multiples of 4 - - assert_eq!( - ( - first_contender.vote_count, - second_contender.vote_count, - lock_vote_tally, - abstain_vote_tally - ), - (Some(64), Some(8), Some(0), Some(0)) - ); - - assert_eq!( - finished_vote_info, - Some(FinishedVoteInfo { - finished_vote_outcome: FinishedVoteOutcome::TowardsIdentity.into(), - won_by_identity_id: Some(identity2_id.to_vec()), - finished_at_block_height: 17, - finished_at_core_block_height: 1, - finished_at_block_time_ms: 1682303986000, - finished_at_epoch: 1 - }) - ); - - // not let's see how much is in processing pools - - let processing_fees = platform - .drive - .get_epoch_processing_credits_for_distribution( - &Epoch::new(1).unwrap(), - None, - platform_version, - ) - .expect("expected to get processing fees made in epoch"); - - // A vote costs 10_000_000 - // Hence we did 4 votes in this epoch - assert_eq!(processing_fees, 40_000_000); - - // Now let's upgrade to version 8 - - let platform = abci_app.platform; - - let platform_state = platform.state.load(); - - let block_start = platform_state - .last_committed_block_info() - .as_ref() - .unwrap() - .basic_info() - .height - + 1; - - let ten_hours_in_ms = 1000 * 60 * 60 * 10; - let config = PlatformConfig { - chain_lock: ChainLockConfig::default_100_67(), - instant_lock: InstantLockConfig::default_100_67(), - execution: ExecutionConfig { - //we disable document triggers because we are using dpns and dpns needs a preorder - use_document_triggers: false, - - ..Default::default() - }, - block_spacing_ms: ten_hours_in_ms, - ..Default::default() - }; - - // We go 45 blocks later - let ChainExecutionOutcome { - abci_app, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - end_time_ms, - identity_nonce_counter, - identity_contract_nonce_counter, - .. - } = continue_chain_for_strategy( - abci_app, - ChainExecutionParameters { - block_start, - core_height_start: 1, - block_count: 45, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - current_proposer_versions: None, - current_identity_nonce_counter: identity_nonce_counter, - current_identity_contract_nonce_counter: identity_contract_nonce_counter, - current_votes: BTreeMap::default(), - start_time_ms: 1681094380000, - current_time_ms: end_time_ms, - current_identities: Vec::new(), - }, - NetworkStrategy { - strategy: Strategy { - start_contracts: vec![], - operations: vec![], - start_identities: StartIdentities::default(), - identity_inserts: Default::default(), - - identity_contract_nonce_gaps: None, - signer: None, - }, - total_hpmns: 20, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: Some(UpgradingInfo { - current_protocol_version: 8, - proposed_protocol_versions_with_weight: vec![(8, 1)], - upgrade_three_quarters_life: 0.1, - }), - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: false, - ..Default::default() - }, - config.clone(), - StrategyRandomness::SeedEntropy(9203), - ); - - let platform = abci_app.platform; - - let platform_state = platform.state.load(); - - let mut block_start = platform_state - .last_committed_block_info() - .as_ref() - .unwrap() - .basic_info() - .height - + 1; - - // We need to create a few more contests - - let document_op_1 = DocumentOp { - contract: dpns_contract.clone(), - action: DocumentAction::DocumentActionInsertSpecific( - BTreeMap::from([ - ("label".into(), "sam".into()), - ("normalizedLabel".into(), "sam".into()), - ("normalizedParentDomainName".into(), "dash".into()), - ("parentDomainName".into(), "dash".into()), - ( - "records".into(), - BTreeMap::from([("identity", Value::from(identity1_id))]).into(), - ), - ]), - Some(identity1_id), - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: document_type.clone(), - }; - - let document_op_2 = DocumentOp { - contract: dpns_contract.clone(), - action: DocumentAction::DocumentActionInsertSpecific( - BTreeMap::from([ - ("label".into(), "sam".into()), - ("normalizedLabel".into(), "sam".into()), - ("normalizedParentDomainName".into(), "dash".into()), - ("parentDomainName".into(), "dash".into()), - ( - "records".into(), - BTreeMap::from([("identity", Value::from(identity2_id))]).into(), - ), - ]), - Some(identity2_id), - DocumentFieldFillType::FillIfNotRequired, - DocumentFieldFillSize::AnyDocumentFillSize, - ), - document_type: document_type.clone(), - }; - - let ChainExecutionOutcome { - abci_app, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - end_time_ms, - identity_nonce_counter, - identity_contract_nonce_counter, - .. - } = continue_chain_for_strategy( - abci_app, - ChainExecutionParameters { - block_start, - core_height_start: 1, - block_count: 1, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - current_proposer_versions: None, - current_identity_nonce_counter: identity_nonce_counter, - current_identity_contract_nonce_counter: identity_contract_nonce_counter, - current_votes: BTreeMap::default(), - start_time_ms: 1681094380000, - current_time_ms: end_time_ms, - current_identities: identities, - }, - NetworkStrategy { - strategy: Strategy { - start_contracts: vec![], - operations: vec![ - Operation { - op_type: OperationType::Document(document_op_1), - frequency: Frequency { - times_per_block_range: 1..2, - chance_per_block: None, - }, - }, - Operation { - op_type: OperationType::Document(document_op_2), - frequency: Frequency { - times_per_block_range: 1..2, - chance_per_block: None, - }, - }, - ], - start_identities: StartIdentities::default(), - identity_inserts: Default::default(), - - identity_contract_nonce_gaps: None, - signer: Some(simple_signer), - }, - total_hpmns: 20, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: Some(UpgradingInfo { - current_protocol_version: 8, - proposed_protocol_versions_with_weight: vec![(8, 1)], - upgrade_three_quarters_life: 0.1, - }), - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: false, - ..Default::default() - }, - config.clone(), - StrategyRandomness::SeedEntropy(9203), - ); - - block_start += 1; - - // We go 14 blocks later till version 8 is active - let outcome = continue_chain_for_strategy( - abci_app, - ChainExecutionParameters { - block_start, - core_height_start: 1, - block_count: 14, - proposers, - validator_quorums, - current_validator_quorum_hash, - instant_lock_quorums, - current_proposer_versions: None, - current_identity_nonce_counter: identity_nonce_counter, - current_identity_contract_nonce_counter: identity_contract_nonce_counter, - current_votes: BTreeMap::default(), - start_time_ms: 1681094380000, - current_time_ms: end_time_ms, - current_identities: Vec::new(), - }, - NetworkStrategy { - strategy: Strategy { - start_contracts: vec![], - operations: vec![], - start_identities: StartIdentities::default(), - identity_inserts: Default::default(), - - identity_contract_nonce_gaps: None, - signer: None, - }, - total_hpmns: 20, - extra_normal_mns: 0, - validator_quorum_count: 24, - chain_lock_quorum_count: 24, - upgrading_info: Some(UpgradingInfo { - current_protocol_version: 8, - proposed_protocol_versions_with_weight: vec![(8, 1)], - upgrade_three_quarters_life: 0.1, - }), - - proposer_strategy: Default::default(), - rotate_quorums: false, - failure_testing: None, - query_testing: None, - verify_state_transition_results: false, - ..Default::default() - }, - config.clone(), - StrategyRandomness::SeedEntropy(9203), - ); - - let platform = outcome.abci_app.platform; - platform - .drive - .fetch_versions_with_counter(None, &platform_version.drive) - .expect("expected to get versions"); - - let state = platform.state.load(); - assert_eq!( - state - .last_committed_block_info() - .as_ref() - .unwrap() - .basic_info() - .epoch - .index, - 4 - ); - assert_eq!(state.current_protocol_version_in_consensus(), 8); - - let processing_fees = platform - .drive - .get_epoch_processing_credits_for_distribution( - &Epoch::new(4).unwrap(), - None, - platform_version, - ) - .expect("expected to get processing fees made in epoch"); - - // A vote costs 10_000_000 - // There were 23 votes total so that means that there would have been 39_780_000_000 left over - // We see that there is 39_780_000_000 to distribute - assert_eq!(processing_fees, 39_780_000_000); + // Wait for the thread to finish and assert that it didn't panic. + handler.join().expect("Thread has panicked"); } } diff --git a/packages/rs-drive-abci/tests/supporting_files/contract/basic-token/basic-token.json b/packages/rs-drive-abci/tests/supporting_files/contract/basic-token/basic-token.json index ec2a3fd928e..bbd55bbc8ce 100644 --- a/packages/rs-drive-abci/tests/supporting_files/contract/basic-token/basic-token.json +++ b/packages/rs-drive-abci/tests/supporting_files/contract/basic-token/basic-token.json @@ -12,8 +12,7 @@ "decimals": 8 }, "baseSupply": 100000, - "maxSupply": null, - "keepsHistory": true + "maxSupply": null } } } \ No newline at end of file diff --git a/packages/rs-drive-proof-verifier/Cargo.toml b/packages/rs-drive-proof-verifier/Cargo.toml index f27d58db77c..024e280c130 100644 --- a/packages/rs-drive-proof-verifier/Cargo.toml +++ b/packages/rs-drive-proof-verifier/Cargo.toml @@ -12,13 +12,14 @@ mocks = [ "dep:platform-serialization-derive", "dpp/document-serde-conversion", "indexmap/serde", + "dpp/data-contract-serde-conversion" ] [dependencies] thiserror = { version = "1.0.63" } dapi-grpc = { path = "../dapi-grpc", default-features = false, features = [ - "platform", + "platform", "client" ] } drive = { path = "../rs-drive", default-features = false, features = [ diff --git a/packages/rs-drive-proof-verifier/src/proof.rs b/packages/rs-drive-proof-verifier/src/proof.rs index 6399a3e691d..e7a43d35777 100644 --- a/packages/rs-drive-proof-verifier/src/proof.rs +++ b/packages/rs-drive-proof-verifier/src/proof.rs @@ -1,7 +1,7 @@ use crate::from_request::TryFromRequest; use crate::provider::DataContractProvider; use crate::verify::verify_tenderdash_proof; -use crate::{types, types::*, ContextProvider, Error}; +use crate::{types::*, ContextProvider, Error}; use dapi_grpc::platform::v0::get_evonodes_proposed_epoch_blocks_by_range_request::get_evonodes_proposed_epoch_blocks_by_range_request_v0::Start; use dapi_grpc::platform::v0::get_identities_contract_keys_request::GetIdentitiesContractKeysRequestV0; use dapi_grpc::platform::v0::get_path_elements_request::GetPathElementsRequestV0; @@ -51,7 +51,7 @@ use std::num::TryFromIntError; /// Parse and verify the received proof and retrieve the requested object, if any. /// /// Use [`FromProof::maybe_from_proof()`] or [`FromProof::from_proof()`] to parse and verify proofs received -/// from the Dash Platform (including verification of grovedb-generated proofs and cryptographic proofs geneerated +/// from the Dash Platform (including verification of grovedb-generated proofs and cryptographic proofs generated /// by Tenderdash). /// /// gRPC responses, received from the Dash Platform in response to requests containing `prove: true`, contain @@ -60,7 +60,7 @@ use std::num::TryFromIntError; /// object (or information that the object does not exist) in one step. /// /// This trait is implemented by several objects defined in [Dash Platform Protocol](dpp), like [Identity], -/// [DataContract], [Documents], etc. It is also implemented by several helper objects from [crate::types] module. +/// [DataContract], [Documents], etc. It is also implemented by several helper objects from [types] module. pub trait FromProof { /// Request type for which this trait is implemented. type Request; @@ -487,7 +487,7 @@ fn parse_key_request_type(request: &Option) -> ResultErr(e), - Ok(d) => Ok(((*k as u8),d)), + Ok(d) => Ok((*k as u8,d)), } }) .collect::>,Error>>()?; @@ -521,15 +521,14 @@ impl FromProof for IdentityNonceFetcher { let mtd = response.metadata().or(Err(Error::EmptyResponseMetadata))?; - let identity_id = match request.version.ok_or(Error::EmptyVersion)? { - get_identity_nonce_request::Version::V0(v0) => { - Ok::( + let identity_id = + match request.version.ok_or(Error::EmptyVersion)? { + get_identity_nonce_request::Version::V0(v0) => Ok::( Identifier::from_bytes(&v0.identity_id).map_err(|e| Error::ProtocolError { error: e.to_string(), })?, - ) - } - }?; + ), + }?; // Extract content from proof and verify Drive/GroveDB proofs let (root_hash, maybe_nonce) = Drive::verify_identity_nonce( @@ -551,7 +550,7 @@ impl FromProof for IdentityNonceFetcher { verify_tenderdash_proof(proof, mtd, &root_hash, provider)?; Ok(( - maybe_nonce.map(types::IdentityNonceFetcher), + maybe_nonce.map(IdentityNonceFetcher), mtd.clone(), proof.clone(), )) @@ -582,7 +581,7 @@ impl FromProof for IdentityContractNo let (identity_id, contract_id) = match request.version.ok_or(Error::EmptyVersion)? { get_identity_contract_nonce_request::Version::V0(v0) => { - Ok::<(dpp::identifier::Identifier, dpp::identifier::Identifier), Error>(( + Ok::<(Identifier, Identifier), Error>(( Identifier::from_bytes(&v0.identity_id).map_err(|e| Error::ProtocolError { error: e.to_string(), })?, @@ -614,7 +613,7 @@ impl FromProof for IdentityContractNo verify_tenderdash_proof(proof, mtd, &root_hash, provider)?; Ok(( - maybe_identity.map(types::IdentityContractNonceFetcher), + maybe_identity.map(IdentityContractNonceFetcher), mtd.clone(), proof.clone(), )) @@ -1014,7 +1013,7 @@ impl FromProof for StateTransitionPro epoch: (mtd.epoch as u16).try_into()?, }; - let contracts_provider_fn = provider.as_contract_lookup_fn(); + let contracts_provider_fn = provider.as_contract_lookup_fn(platform_version); let (root_hash, result) = Drive::verify_state_transition_was_executed_with_proof( &state_transition, @@ -1253,11 +1252,11 @@ impl FromProof for MasternodeProtoco .into_iter() .map(|(key, value)| { ProTxHash::from_slice(&key) - .map(|protxhash| { + .map(|pro_tx_hash| { ( - protxhash, + pro_tx_hash, Some(MasternodeProtocolVote { - pro_tx_hash: protxhash, + pro_tx_hash, voted_version: value, }), ) @@ -1310,7 +1309,6 @@ impl FromProof for Elements { } } -// #[cfg_attr(feature = "mocks", mockall::automock)] impl<'dq, Q> FromProof for Documents where Q: TryInto> + Clone + 'dq, @@ -1476,8 +1474,9 @@ impl FromProof for ContestedResources { // Decode request to get drive query let drive_query = VotePollsByDocumentTypeQuery::try_from_request(request)?; - let resolved_request = - drive_query.resolve_with_known_contracts_provider(&provider.as_contract_lookup_fn())?; + let resolved_request = drive_query.resolve_with_known_contracts_provider( + &provider.as_contract_lookup_fn(platform_version), + )?; // Parse response to read proof and metadata let proof = response.proof().or(Err(Error::NoProofInResult))?; @@ -1525,7 +1524,7 @@ impl FromProof for Contenders { let drive_query = ContestedDocumentVotePollDriveQuery::try_from_request(request)?; // Resolve request to get verify_*_proof - let contracts_provider = provider.as_contract_lookup_fn(); + let contracts_provider = provider.as_contract_lookup_fn(platform_version); let resolved_request = drive_query.resolve_with_known_contracts_provider(&contracts_provider)?; @@ -1584,7 +1583,7 @@ impl FromProof for Voters { let drive_query = ContestedDocumentVotePollVotesDriveQuery::try_from_request(request)?; // Parse request to get resolved contract that implements verify_*_proof - let contracts_provider = provider.as_contract_lookup_fn(); + let contracts_provider = provider.as_contract_lookup_fn(platform_version); let resolved_request = drive_query.resolve_with_known_contracts_provider(&contracts_provider)?; @@ -1640,7 +1639,7 @@ impl FromProof for ResourceV let proof = response.proof().or(Err(Error::NoProofInResult))?; let mtd = response.metadata().or(Err(Error::EmptyResponseMetadata))?; - let contract_provider_fn = provider.as_contract_lookup_fn(); + let contract_provider_fn = provider.as_contract_lookup_fn(platform_version); let (root_hash, voters) = drive_query .verify_identity_votes_given_proof::>( &proof.grovedb_proof, @@ -2005,7 +2004,7 @@ fn u32_to_u16_opt(i: u32) -> Result, Error> { let i: Option = if i != 0 { let i: u16 = i .try_into() - .map_err(|e: std::num::TryFromIntError| Error::RequestError { + .map_err(|e: TryFromIntError| Error::RequestError { error: format!("value {} out of range: {}", i, e), })?; Some(i) @@ -2084,7 +2083,7 @@ impl Length for IndexMap> { /// # Arguments /// /// * `$object`: The type for which to implement Length trait -/// * `$len`: A closure that returns the length of the object; if ommitted, defaults to 1 +/// * `$len`: A closure that returns the length of the object; if omitted, defaults to 1 macro_rules! define_length { ($object:ty,$some:expr,$counter:expr) => { impl Length for $object { diff --git a/packages/rs-drive-proof-verifier/src/provider.rs b/packages/rs-drive-proof-verifier/src/provider.rs index e7eafd2e45f..2761dd5ac4c 100644 --- a/packages/rs-drive-proof-verifier/src/provider.rs +++ b/packages/rs-drive-proof-verifier/src/provider.rs @@ -1,5 +1,7 @@ use crate::error::ContextProviderError; +use dpp::data_contract::serialized_version::DataContractInSerializationFormat; use dpp::prelude::{CoreBlockHeight, DataContract, Identifier}; +use dpp::version::PlatformVersion; use drive::{error::proof::ProofError, query::ContractLookupFn}; #[cfg(feature = "mocks")] use hex::ToHex; @@ -43,6 +45,7 @@ pub trait ContextProvider: Send + Sync { /// # Arguments /// /// * `data_contract_id`: The ID of the data contract to fetch. + /// * `platform_version`: The platform version to use. /// /// # Returns /// @@ -52,6 +55,7 @@ pub trait ContextProvider: Send + Sync { fn get_data_contract( &self, id: &Identifier, + platform_version: &PlatformVersion, ) -> Result>, ContextProviderError>; /// Gets the platform activation height from core. Once this has happened this can be hardcoded. @@ -77,8 +81,9 @@ impl + Send + Sync> ContextProvider for C { fn get_data_contract( &self, id: &Identifier, + platform_version: &PlatformVersion, ) -> Result>, ContextProviderError> { - self.as_ref().get_data_contract(id) + self.as_ref().get_data_contract(id, platform_version) } fn get_platform_activation_height(&self) -> Result { @@ -93,9 +98,10 @@ where fn get_data_contract( &self, id: &Identifier, + platform_version: &PlatformVersion, ) -> Result>, ContextProviderError> { let lock = self.lock().expect("lock poisoned"); - lock.get_data_contract(id) + lock.get_data_contract(id, platform_version) } fn get_quorum_public_key( &self, @@ -119,13 +125,19 @@ where /// It is used internally by the Drive proof verification functions to look up data contracts. pub trait DataContractProvider: Send + Sync { /// Returns [ContractLookupFn] function that can be used to look up a [DataContract] by its [Identifier]. - fn as_contract_lookup_fn(&self) -> Box; + fn as_contract_lookup_fn<'a>( + &'a self, + platform_version: &'a PlatformVersion, + ) -> Box>; } impl DataContractProvider for C { /// Returns function that uses [ContextProvider] to provide a [DataContract] to Drive proof verification functions - fn as_contract_lookup_fn(&self) -> Box { + fn as_contract_lookup_fn<'a>( + &'a self, + platform_version: &'a PlatformVersion, + ) -> Box> { let f = |id: &Identifier| -> Result>, drive::error::Error> { - self.get_data_contract(id).map_err(|e| { + self.get_data_contract(id, platform_version).map_err(|e| { drive::error::Error::Proof(ProofError::ErrorRetrievingContract(e.to_string())) }) }; @@ -216,6 +228,7 @@ impl ContextProvider for MockContextProvider { fn get_data_contract( &self, data_contract_id: &Identifier, + platform_version: &PlatformVersion, ) -> Result>, ContextProviderError> { let path = match &self.quorum_keys_dir { Some(p) => p, @@ -239,7 +252,25 @@ impl ContextProvider for MockContextProvider { } }; - let dc: DataContract = serde_json::from_reader(f).expect("cannot parse data contract"); + let serialized_form: DataContractInSerializationFormat = serde_json::from_reader(f) + .map_err(|e| { + ContextProviderError::DataContractFailure(format!( + "cannot deserialized data contract with id {}: {}", + data_contract_id, e + )) + })?; + let dc = DataContract::try_from_platform_versioned( + serialized_form, + false, + &mut vec![], + platform_version, + ) + .map_err(|e| { + ContextProviderError::DataContractFailure(format!( + "cannot use serialized version of data contract with id {}: {}", + data_contract_id, e + )) + })?; Ok(Some(Arc::new(dc))) } diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/add_epoch_final_info_operation/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/add_epoch_final_info_operation/mod.rs new file mode 100644 index 00000000000..31dece01853 --- /dev/null +++ b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/add_epoch_final_info_operation/mod.rs @@ -0,0 +1,69 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use grovedb::batch::QualifiedGroveDbOp; + +use dpp::block::epoch::Epoch; +use dpp::block::finalized_epoch_info::FinalizedEpochInfo; + +use dpp::version::PlatformVersion; + +impl Drive { + /// Adds a finalized epoch information operation to the batch. + /// + /// This method creates a `LowLevelDriveOperation` that records the finalized + /// details of an epoch, such as processing fees, storage fees, block proposers, + /// and protocol version. The operation is added to the batch for execution. + /// + /// The method dispatches to the appropriate versioned implementation based on + /// the provided `platform_version`. + /// + /// # Parameters + /// + /// - `epoch`: A reference to the `Epoch` being finalized. + /// - `finalized_epoch_info`: The finalized information for the epoch, including + /// fees, block statistics, and proposer data. + /// - `platform_version`: The platform version, which determines the correct + /// implementation of this method. + /// + /// # Returns + /// + /// - `Ok(LowLevelDriveOperation)`: A low-level drive operation to be executed. + /// - `Err(Error)`: If the platform version is unknown or if an internal error occurs. + /// + /// # Errors + /// + /// - Returns `Error::Drive(DriveError::UnknownVersionMismatch)` if an unsupported + /// platform version is encountered. + /// + /// # Versioning + /// + /// This method supports multiple versions, allowing changes to the internal logic + /// while maintaining backward compatibility. The method currently supports: + /// + /// - **Version 0:** Calls `add_epoch_final_info_operation_v0`. + /// + pub fn add_epoch_final_info_operation( + &self, + epoch: &Epoch, + finalized_epoch_info: FinalizedEpochInfo, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive + .methods + .credit_pools + .epochs + .add_epoch_final_info_operation + { + 0 => self.add_epoch_final_info_operation_v0(epoch, finalized_epoch_info), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "add_epoch_final_info_operation".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/add_epoch_final_info_operation/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/add_epoch_final_info_operation/v0/mod.rs new file mode 100644 index 00000000000..212ca01d11e --- /dev/null +++ b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/add_epoch_final_info_operation/v0/mod.rs @@ -0,0 +1,30 @@ +use grovedb::batch::QualifiedGroveDbOp; +use grovedb::Element; + +use crate::drive::Drive; +use crate::error::Error; + +use crate::drive::credit_pools::epochs::epoch_key_constants; +use crate::drive::credit_pools::epochs::paths::EpochProposers; +use dpp::block::epoch::Epoch; +use dpp::block::finalized_epoch_info::FinalizedEpochInfo; +use dpp::serialization::PlatformSerializable; + +impl Drive { + /// Serializes and stores the epoch final info + pub(super) fn add_epoch_final_info_operation_v0( + &self, + epoch: &Epoch, + finalized_epoch_info: FinalizedEpochInfo, + ) -> Result { + let epoch_tree_path = epoch.get_path_vec(); + + let serialized = finalized_epoch_info.serialize_consume_to_bytes()?; + + Ok(QualifiedGroveDbOp::insert_or_replace_op( + epoch_tree_path, + epoch_key_constants::KEY_FINISHED_EPOCH_INFO.to_vec(), + Element::new_item(serialized), + )) + } +} diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/mod.rs index 847c7036291..21a2fda99fc 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/mod.rs @@ -7,7 +7,7 @@ use crate::error::drive::DriveError; use crate::error::Error; use dpp::block::epoch::Epoch; - +use dpp::prelude::FeeMultiplier; use dpp::version::PlatformVersion; impl Drive { @@ -28,7 +28,7 @@ impl Drive { epoch_tree: &Epoch, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result { match platform_version .drive .methods diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/v0/mod.rs index 46984456fa6..dbbb40158a1 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/v0/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_fee_multiplier/v0/mod.rs @@ -7,6 +7,7 @@ use crate::error::Error; use crate::drive::credit_pools::epochs::epoch_key_constants; use crate::drive::credit_pools::epochs::paths::EpochProposers; use dpp::block::epoch::Epoch; +use dpp::prelude::FeeMultiplier; use platform_version::version::PlatformVersion; impl Drive { @@ -16,7 +17,7 @@ impl Drive { epoch_tree: &Epoch, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result { let element = self .grove .get( diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/mod.rs index 6bc2fc72018..94d09298fb8 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/mod.rs @@ -6,7 +6,7 @@ use crate::drive::Drive; use crate::error::drive::DriveError; use crate::error::Error; use dpp::block::epoch::Epoch; -use dpp::fee::Credits; +use dpp::block::pool_credits::StorageAndProcessingPoolCredits; use dpp::version::PlatformVersion; @@ -21,14 +21,14 @@ impl Drive { /// /// # Returns /// - /// A Result containing either the total credits for the epoch, if found, - /// or an Error if something goes wrong. + /// A Result containing either the total credits for the epoch as a StorageAndProcessingPoolCredits + /// if found, or an Error if something goes wrong. pub fn get_epoch_total_credits_for_distribution( &self, epoch_tree: &Epoch, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result { match platform_version .drive .methods diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/v0/mod.rs index ea9fce1ef1e..36c6fd1974a 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/v0/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/get_epoch_total_credits_for_distribution/v0/mod.rs @@ -5,7 +5,7 @@ use crate::fees::get_overflow_error; use crate::error::Error; use dpp::block::epoch::Epoch; -use dpp::fee::Credits; +use dpp::block::pool_credits::StorageAndProcessingPoolCredits; use dpp::version::PlatformVersion; @@ -16,7 +16,7 @@ impl Drive { epoch_tree: &Epoch, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result { let storage_pool_credits = self.get_epoch_storage_credits_for_distribution( epoch_tree, transaction, @@ -29,9 +29,15 @@ impl Drive { platform_version, )?; - storage_pool_credits - .checked_add(processing_pool_credits) - .ok_or_else(|| get_overflow_error("overflow getting total credits for distribution")) + Ok(StorageAndProcessingPoolCredits { + storage_pool_credits, + processing_pool_credits, + total_credits: storage_pool_credits + .checked_add(processing_pool_credits) + .ok_or_else(|| { + get_overflow_error("overflow getting total credits for distribution") + })?, + }) } } @@ -77,7 +83,8 @@ mod tests { let retrieved_combined_fee = drive .get_epoch_total_credits_for_distribution(&epoch, Some(&transaction), platform_version) - .expect("should get combined fee"); + .expect("should get combined fee") + .total_credits; assert_eq!(retrieved_combined_fee, processing_fee + storage_fee); } diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/mod.rs index 2096f129060..df97bfa8159 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/credit_distribution_pools/mod.rs @@ -3,6 +3,7 @@ //! This module implements functions in Drive to distribute fees for a given Epoch. //! +mod add_epoch_final_info_operation; mod add_epoch_processing_credits_for_distribution_operation; mod get_epoch_fee_multiplier; mod get_epoch_processing_credits_for_distribution; diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/epoch_key_constants.rs b/packages/rs-drive/src/drive/credit_pools/epochs/epoch_key_constants.rs index d3c987a75c4..c9d06599e8c 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/epoch_key_constants.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/epoch_key_constants.rs @@ -1,39 +1,44 @@ /// Processing fee pool key -pub const KEY_POOL_PROCESSING_FEES: &[u8; 1] = b"p"; +pub const KEY_POOL_PROCESSING_FEES: &[u8; 1] = b"p"; // 112 /// Processing fee pool key as u8 pub const KEY_POOL_PROCESSING_FEES_U8: u8 = b'p'; /// Storage fee pool key -pub const KEY_POOL_STORAGE_FEES: &[u8; 1] = b"s"; +pub const KEY_POOL_STORAGE_FEES: &[u8; 1] = b"s"; // 115 /// Storage fee pool key as u8 pub const KEY_POOL_STORAGE_FEES_U8: u8 = b's'; /// Start time key -pub const KEY_START_TIME: &[u8; 1] = b"t"; +pub const KEY_START_TIME: &[u8; 1] = b"t"; // 116 /// Start time key as u8 pub const KEY_START_TIME_U8: u8 = b't'; /// Epoch's protocol version key -pub const KEY_PROTOCOL_VERSION: &[u8; 1] = b"v"; +pub const KEY_PROTOCOL_VERSION: &[u8; 1] = b"v"; // 118 /// Epoch's protocol version key as u8 pub const KEY_PROTOCOL_VERSION_U8: u8 = b'v'; /// Start block height key -pub const KEY_START_BLOCK_HEIGHT: &[u8; 1] = b"h"; +pub const KEY_START_BLOCK_HEIGHT: &[u8; 1] = b"h"; // 104 /// Start block height key as u8 pub const KEY_START_BLOCK_HEIGHT_U8: u8 = b'h'; /// Start block core chain locked height key -pub const KEY_START_BLOCK_CORE_HEIGHT: &[u8; 1] = b"c"; +pub const KEY_START_BLOCK_CORE_HEIGHT: &[u8; 1] = b"c"; // 99 /// Start block core chain locked height key as u8 pub const KEY_START_BLOCK_CORE_HEIGHT_U8: u8 = b'c'; /// Proposers key -pub const KEY_PROPOSERS: &[u8; 1] = b"m"; +pub const KEY_PROPOSERS: &[u8; 1] = b"m"; // 109 /// Proposers key as u8 pub const KEY_PROPOSERS_U8: u8 = b'm'; /// Fee multiplier key -pub const KEY_FEE_MULTIPLIER: &[u8; 1] = b"x"; +pub const KEY_FEE_MULTIPLIER: &[u8; 1] = b"x"; // 120 /// Fee multiplier key as u8 pub const KEY_FEE_MULTIPLIER_U8: u8 = b'x'; + +/// Gives information about a finished epoch +pub const KEY_FINISHED_EPOCH_INFO: &[u8; 1] = b"f"; // 102 +/// Gives information about a finished epoch +pub const KEY_FINISHED_EPOCH_INFO_U8: u8 = b'f'; diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/get_epoch_protocol_version/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/get_epoch_protocol_version/mod.rs new file mode 100644 index 00000000000..9bbc1e8b987 --- /dev/null +++ b/packages/rs-drive/src/drive/credit_pools/epochs/get_epoch_protocol_version/mod.rs @@ -0,0 +1,47 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; + +use dpp::block::epoch::Epoch; + +use grovedb::TransactionArg; + +use dpp::version::PlatformVersion; +use platform_version::version::ProtocolVersion; + +impl Drive { + /// Returns the protocol version for the epoch + /// + /// # Arguments + /// + /// * `epoch_tree` - An Epoch instance. + /// * `transaction` - A TransactionArg instance. + /// * `platform_version` - A PlatformVersion instance representing the version of Platform. + /// + /// # Returns + /// + /// A Result containing the start block height or an Error. + pub fn get_epoch_protocol_version( + &self, + epoch_tree: &Epoch, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive + .methods + .credit_pools + .epochs + .get_epoch_protocol_version + { + 0 => self.get_epoch_protocol_version_v0(epoch_tree, transaction, platform_version), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "get_epoch_protocol_version".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/get_epoch_protocol_version/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/get_epoch_protocol_version/v0/mod.rs new file mode 100644 index 00000000000..ef7d9fb7a7f --- /dev/null +++ b/packages/rs-drive/src/drive/credit_pools/epochs/get_epoch_protocol_version/v0/mod.rs @@ -0,0 +1,50 @@ +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; + +use dpp::block::epoch::Epoch; + +use crate::drive::credit_pools::epochs::epoch_key_constants::KEY_PROTOCOL_VERSION; +use crate::drive::credit_pools::epochs::paths::EpochProposers; +use grovedb::{Element, TransactionArg}; +use platform_version::version::{PlatformVersion, ProtocolVersion}; + +impl Drive { + /// Returns the block height of the Epoch's start block + pub(super) fn get_epoch_protocol_version_v0( + &self, + epoch_tree: &Epoch, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + let element = self + .grove + .get( + &epoch_tree.get_path(), + KEY_PROTOCOL_VERSION.as_slice(), + transaction, + &platform_version.drive.grove_version, + ) + .unwrap() + .map_err(Error::GroveDB)?; + + let Element::Item(encoded_protocol_version, _) = element else { + return Err(Error::Drive(DriveError::UnexpectedElementType( + "protocol version must be an item", + ))); + }; + + let protocol_version = ProtocolVersion::from_be_bytes( + encoded_protocol_version + .as_slice() + .try_into() + .map_err(|_| { + Error::Drive(DriveError::CorruptedSerialization(String::from( + "protocol version must be u32", + ))) + })?, + ); + + Ok(protocol_version) + } +} diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/get_epochs_protocol_versions/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/get_epochs_protocol_versions/v0/mod.rs index 4284f8956b2..057d531565d 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/get_epochs_protocol_versions/v0/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/get_epochs_protocol_versions/v0/mod.rs @@ -77,19 +77,18 @@ impl Drive { ))); }; - let epoch_index_bytes: [u8; 32] = + let epoch_index_bytes: [u8; 2] = epoch_index_vec.as_slice().try_into().map_err(|_| { Error::Drive(DriveError::CorruptedSerialization( "extended epoch info: item has an invalid length".to_string(), )) })?; - let epoch_index = - EpochIndex::from_be_bytes([epoch_index_bytes[0], epoch_index_bytes[1]]) - .checked_sub(EPOCH_KEY_OFFSET) - .ok_or(Error::Drive(DriveError::CorruptedSerialization( - "epoch bytes on disk too small, should be over epoch key offset" - .to_string(), - )))?; + let epoch_index = EpochIndex::from_be_bytes(epoch_index_bytes) + .checked_sub(EPOCH_KEY_OFFSET) + .ok_or(Error::Drive(DriveError::CorruptedSerialization( + "epoch bytes on disk too small, should be over epoch key offset" + .to_string(), + )))?; let Element::Item(encoded_protocol_version, _) = protocol_version_element else { return Err(Error::Drive(DriveError::UnexpectedElementType( diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/get_finalized_epoch_info/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/get_finalized_epoch_info/mod.rs new file mode 100644 index 00000000000..9e65128888f --- /dev/null +++ b/packages/rs-drive/src/drive/credit_pools/epochs/get_finalized_epoch_info/mod.rs @@ -0,0 +1,77 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; + +use dpp::block::epoch::EpochIndex; +use dpp::block::finalized_epoch_info::FinalizedEpochInfo; +use dpp::version::PlatformVersion; +use grovedb::TransactionArg; + +impl Drive { + /// Retrieves finalized epoch information for a specified range of epochs. + /// + /// This method fetches finalized epoch details between `start_epoch_index` (inclusive or exclusive) + /// and `end_epoch_index` (inclusive or exclusive) and returns them as a collection. + /// + /// The method first determines the correct versioned implementation to invoke, based on the + /// provided `platform_version`. Currently, only version `0` is supported. + /// + /// ## Parameters + /// + /// - `start_epoch_index` (`u16`): + /// The starting epoch index for the query. + /// - `start_epoch_index_included` (`bool`): + /// If `true`, includes `start_epoch_index` in the results. + /// - `end_epoch_index` (`u16`): + /// The ending epoch index for the query. + /// - `end_epoch_index_included` (`bool`): + /// If `true`, includes `end_epoch_index` in the results. + /// - `transaction` (`TransactionArg`): + /// The current database transaction for querying storage. + /// - `platform_version` (`&PlatformVersion`): + /// The platform version to use for method dispatch. + /// + /// ## Returns + /// + /// - `Ok(T)`: A collection (`T`) of `(EpochIndex, FinalizedEpochInfo)` tuples, + /// where `T` implements `FromIterator<(EpochIndex, FinalizedEpochInfo)>`. + /// - `Err(Error)`: An error if querying fails due to version mismatch or storage issues. + /// + /// ## Errors + /// + /// - Returns `DriveError::UnknownVersionMismatch` if an unsupported `platform_version` is provided. + /// - Any errors returned by `get_finalized_epoch_infos_v0` if the query fails. + pub fn get_finalized_epoch_infos>( + &self, + start_epoch_index: u16, + start_epoch_index_included: bool, + end_epoch_index: u16, + end_epoch_index_included: bool, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive + .methods + .credit_pools + .epochs + .get_finalized_epoch_infos + { + 0 => self.get_finalized_epoch_infos_v0( + start_epoch_index, + start_epoch_index_included, + end_epoch_index, + end_epoch_index_included, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "get_finalized_epoch_infos".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/get_finalized_epoch_info/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/get_finalized_epoch_info/v0/mod.rs new file mode 100644 index 00000000000..b93f08298f7 --- /dev/null +++ b/packages/rs-drive/src/drive/credit_pools/epochs/get_finalized_epoch_info/v0/mod.rs @@ -0,0 +1,173 @@ +use crate::drive::credit_pools::epochs::epoch_key_constants::KEY_FINISHED_EPOCH_INFO; +use crate::drive::credit_pools::pools_vec_path; +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::query::QueryItem; +use dpp::block::epoch::{EpochIndex, EPOCH_KEY_OFFSET}; +use dpp::block::finalized_epoch_info::FinalizedEpochInfo; +use dpp::serialization::PlatformDeserializable; +use dpp::version::PlatformVersion; +use dpp::ProtocolError; +use grovedb::query_result_type::QueryResultType; +use grovedb::{PathQuery, Query, SizedQuery, TransactionArg}; + +impl Drive { + /// Retrieves finalized epoch information for a given range of epochs. + /// + /// This method constructs a query over the stored finalized epoch information based on + /// the epoch indices provided. The query range is determined using: + /// + /// - `start_epoch_index` and `end_epoch_index`: the epoch indices (of type `u16`). + /// - `start_epoch_index_included` and `end_epoch_index_included`: booleans specifying + /// whether the start and end boundaries are included in the query. + /// + /// Before constructing the query, an internal offset (`EPOCH_KEY_OFFSET`) is added to + /// the epoch indices, and the resulting values are converted to big‑endian byte arrays. + /// + /// The query is then built using one of several `QueryItem` variants, depending on the + /// following cases: + /// + /// 1. **Single Key Query:** If `start_epoch_index == end_epoch_index` and both boundaries + /// are included, the query returns exactly that key (using `QueryItem::Key`). + /// If either boundary is excluded, the result is empty. + /// + /// 2. **Ascending Range Query:** If `start_epoch_index < end_epoch_index`, the query is + /// constructed in ascending order: + /// - **Both boundaries included:** `QueryItem::RangeInclusive(start_key, end_key)`. + /// - **Start included, end excluded:** `QueryItem::Range(start_key, end_key)`. + /// - **Start excluded, end included:** `QueryItem::RangeAfterToInclusive(start_key, end_key)`. + /// - **Both boundaries excluded:** `QueryItem::RangeAfterTo(start_key, end_key)`. + /// + /// 3. **Descending Range Query:** If `start_epoch_index > end_epoch_index`, the roles + /// of the keys are reversed and similar range variants are used, with the query’s + /// `left_to_right` flag set to `false`. + /// + /// Finally, the query is executed and the results are parsed into a vector of + /// `FinalizedEpochInfo`. + /// + /// # Parameters + /// + /// - `start_epoch_index` (`u16`): The starting epoch index for the query. + /// - `start_epoch_index_included` (`bool`): If `true`, the epoch at `start_epoch_index` is included. + /// - `end_epoch_index` (`u16`): The ending epoch index for the query. + /// - `end_epoch_index_included` (`bool`): If `true`, the epoch at `end_epoch_index` is included. + /// - `transaction` (`TransactionArg`): The current GroveDB transaction. + /// - `platform_version` (`&PlatformVersion`): The platform version to use for method dispatch. + /// + /// # Returns + /// + /// A `Result` containing a vector of `FinalizedEpochInfo` on success or an `Error` on failure. + /// + /// # Errors + /// + /// - Returns a `ProtocolError::Overflow` if an epoch index plus the offset overflows. + /// - Returns errors from the underlying storage query if the query fails. + /// - Returns an empty vector if the range is empty due to exclusion of boundaries. + /// + pub(super) fn get_finalized_epoch_infos_v0< + T: FromIterator<(EpochIndex, FinalizedEpochInfo)>, + >( + &self, + start_epoch_index: u16, + start_epoch_index_included: bool, + end_epoch_index: u16, + end_epoch_index_included: bool, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + // Compute the start and end keys with the offset. + let start_index = start_epoch_index + .checked_add(EPOCH_KEY_OFFSET) + .ok_or(ProtocolError::Overflow("Stored epoch index too high"))?; + let end_index = end_epoch_index + .checked_add(EPOCH_KEY_OFFSET) + .ok_or(ProtocolError::Overflow("Stored epoch index too high"))?; + + let start_key = start_index.to_be_bytes().to_vec(); + let end_key = end_index.to_be_bytes().to_vec(); + + // Determine if the query should be ascending. + let ascending = start_epoch_index <= end_epoch_index; + + // Build the query item based on the range and inclusivity parameters. + let query_item = if start_epoch_index == end_epoch_index { + // If the start and end are equal, only return a result if both boundaries are included. + if start_epoch_index_included && end_epoch_index_included { + QueryItem::Key(start_key) + } else { + // No epochs satisfy the range. + return Ok(T::from_iter(std::iter::empty())); + } + } else if ascending { + // Ascending order: start_epoch_index < end_epoch_index. + if start_epoch_index_included && end_epoch_index_included { + QueryItem::RangeInclusive(start_key..=end_key) + } else if start_epoch_index_included && !end_epoch_index_included { + QueryItem::Range(start_key..end_key) + } else if !start_epoch_index_included && end_epoch_index_included { + QueryItem::RangeAfterToInclusive(start_key..=end_key) + } else { + QueryItem::RangeAfterTo(start_key..end_key) + } + } else { + // Descending order: start_epoch_index > end_epoch_index. + if start_epoch_index_included && end_epoch_index_included { + QueryItem::RangeInclusive(end_key..=start_key) + } else if start_epoch_index_included && !end_epoch_index_included { + QueryItem::Range(end_key..start_key) + } else if !start_epoch_index_included && end_epoch_index_included { + QueryItem::RangeAfterToInclusive(end_key..=start_key) + } else { + QueryItem::RangeAfterTo(end_key..start_key) + } + }; + + // Construct the query. + let mut query = Query::new_single_query_item(query_item); + query.left_to_right = ascending; + query.set_subquery_key(KEY_FINISHED_EPOCH_INFO.to_vec()); + let path_query = PathQuery::new(pools_vec_path(), SizedQuery::new(query, None, None)); + + let results = self + .grove_get_path_query( + &path_query, + transaction, + QueryResultType::QueryPathKeyElementTrioResultType, + &mut vec![], + &platform_version.drive, + )? + .0; + + results + .to_path_key_elements() + .into_iter() + .map(|(mut path, _, element)| { + let epoch_index_vec = + path.pop() + .ok_or(Error::Drive(DriveError::CorruptedDriveState( + "the path must have a last element".to_string(), + )))?; + + let epoch_index_bytes: [u8; 2] = + epoch_index_vec.as_slice().try_into().map_err(|_| { + Error::Drive(DriveError::CorruptedSerialization( + "extended epoch info: item has an invalid length".to_string(), + )) + })?; + let epoch_index = EpochIndex::from_be_bytes(epoch_index_bytes) + .checked_sub(EPOCH_KEY_OFFSET) + .ok_or(Error::Drive(DriveError::CorruptedSerialization( + "epoch bytes on disk too small, should be over epoch key offset" + .to_string(), + )))?; + + let item_bytes = element.as_item_bytes()?; + + let epoch_info = FinalizedEpochInfo::deserialize_from_bytes(item_bytes)?; + + Ok((epoch_index, epoch_info)) + }) + .collect::>() + } +} diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/mod.rs index 4d84a335e3f..40a85b3de1f 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/mod.rs @@ -28,5 +28,10 @@ pub mod start_block; #[cfg(feature = "server")] pub mod start_time; +#[cfg(feature = "server")] +mod get_epoch_protocol_version; #[cfg(feature = "server")] mod has_epoch_tree_exists; + +#[cfg(feature = "server")] +mod get_finalized_epoch_info; diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/operations_factory.rs b/packages/rs-drive/src/drive/credit_pools/epochs/operations_factory.rs index b6ea2661eae..f4c4b9faee4 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/operations_factory.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/operations_factory.rs @@ -17,6 +17,7 @@ use crate::util::batch::grovedb_op_batch::GroveDbOpBatchV0Methods; use dpp::balances::credits::Creditable; use dpp::block::epoch::Epoch; use dpp::fee::Credits; +use dpp::prelude::Identifier; use dpp::util::deserializer::ProtocolVersion; use dpp::version::PlatformVersion; use grovedb::batch::QualifiedGroveDbOp; @@ -93,7 +94,7 @@ pub trait EpochOperations { /// Adds a groveDB op to the batch which deletes the given epoch proposers from the proposers tree. fn add_delete_proposers_operations( &self, - pro_tx_hashes: Vec>, + pro_tx_hashes: Vec, batch: &mut GroveDbOpBatch, ); } @@ -298,11 +299,11 @@ impl EpochOperations for Epoch { /// Adds a groveDB op to the batch which deletes the given epoch proposers from the proposers tree. fn add_delete_proposers_operations( &self, - pro_tx_hashes: Vec>, + pro_tx_hashes: Vec, batch: &mut GroveDbOpBatch, ) { for pro_tx_hash in pro_tx_hashes.into_iter() { - batch.add_delete(self.get_proposers_path_vec(), pro_tx_hash); + batch.add_delete(self.get_proposers_path_vec(), pro_tx_hash.to_vec()); } } } @@ -523,7 +524,7 @@ mod tests { .get_epoch_start_time(&epoch, Some(&transaction), platform_version) .expect("should get start time"); - assert_eq!(stored_start_time, start_time); + assert_eq!(stored_start_time, Some(start_time)); let stored_block_height = drive .get_epoch_start_block_height(&epoch, Some(&transaction), platform_version) @@ -691,7 +692,7 @@ mod tests { .get_epoch_start_time(&epoch_tree, Some(&transaction), platform_version) .expect("should get start time"); - assert_eq!(start_time_ms, actual_start_time_ms); + assert_eq!(Some(start_time_ms), actual_start_time_ms); } #[test] @@ -904,6 +905,7 @@ mod tests { mod delete_proposers { use super::*; use crate::query::proposer_block_count_query::ProposerQueryType; + use dpp::prelude::Identifier; #[test] fn test_values_are_being_deleted() { @@ -947,8 +949,8 @@ mod tests { let mut awaited_result = pro_tx_hashes .iter() - .map(|hash| (hash.to_vec(), 1)) - .collect::, u64)>>(); + .map(|hash| ((*hash).into(), 1)) + .collect::>(); // sort both result to be able to compare them stored_proposers.sort(); diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/mod.rs index fd75a4c17cc..c972ce86845 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/mod.rs @@ -6,9 +6,9 @@ use crate::drive::Drive; use crate::error::drive::DriveError; use crate::error::Error; -use dpp::block::epoch::Epoch; - use crate::query::proposer_block_count_query::ProposerQueryType; +use dpp::block::epoch::Epoch; +use dpp::identifier::Identifier; use dpp::version::PlatformVersion; impl Drive { @@ -29,8 +29,8 @@ impl Drive { /// # Returns /// /// A `Result` containing: - /// - `Vec<(Vec, u64)>`: A vector of tuples where each tuple contains: - /// - A byte vector (`Vec`) representing the proposer's transaction hash. + /// - `Vec<(Identifier, u64)>`: A vector of tuples where each tuple contains: + /// - An identifier representing an Evonode's pro_tx_hash identifier. /// - A `u64` representing the number of blocks proposed by that proposer. /// - `Error`: An error if the query fails due to an invalid platform version, transaction issues, or invalid epoch data. /// @@ -46,7 +46,7 @@ impl Drive { query_type: ProposerQueryType, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result, u64)>, Error> { + ) -> Result, Error> { match platform_version .drive .methods diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/v0/mod.rs index 992cb31dd98..db8cec899f8 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/v0/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/proposers/fetch_epoch_proposers/v0/mod.rs @@ -6,6 +6,7 @@ use crate::error::drive::DriveError; use crate::error::Error; use crate::query::proposer_block_count_query::ProposerQueryType; use dpp::block::epoch::Epoch; +use dpp::prelude::Identifier; use platform_version::version::PlatformVersion; impl Drive { @@ -26,7 +27,7 @@ impl Drive { /// # Returns /// /// A `Result` containing: - /// - `Vec<(Vec, u64)>`: A vector of tuples where each tuple contains: + /// - `Vec<(Identifier, u64)>`: A vector of tuples where each tuple contains: /// - A byte vector (`Vec`) representing the proposer's transaction hash. /// - A `u64` representing the number of blocks proposed by that proposer. /// - `Error`: An error if the query fails due to an invalid platform version, transaction issues, or invalid epoch data. @@ -43,7 +44,7 @@ impl Drive { query_type: ProposerQueryType, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result, u64)>, Error> { + ) -> Result, Error> { let use_optional = query_type.allows_optional(); let path_query = query_type.into_path_query(epoch_tree); @@ -76,7 +77,13 @@ impl Drive { } }; - Ok((pro_tx_hash, block_count)) + let identifier = pro_tx_hash.try_into().map_err(|_| { + Error::Drive(DriveError::CorruptedDriveState( + "pro_tx_hash should be 32 bytes".to_string(), + )) + })?; + + Ok((identifier, block_count)) }) .collect::>() } else { @@ -113,7 +120,13 @@ impl Drive { })?, ); - Ok((pro_tx_hash, block_count)) + let identifier = pro_tx_hash.try_into().map_err(|_| { + Error::Drive(DriveError::CorruptedDriveState( + "pro_tx_hash should be 32 bytes".to_string(), + )) + })?; + + Ok((identifier, block_count)) }) .collect::>() }?; @@ -125,12 +138,12 @@ impl Drive { #[cfg(test)] mod tests { use crate::drive::credit_pools::epochs::operations_factory::EpochOperations; + use crate::query::proposer_block_count_query::ProposerQueryType; use crate::util::batch::grovedb_op_batch::GroveDbOpBatchV0Methods; use crate::util::batch::GroveDbOpBatch; use crate::util::test_helpers::setup::setup_drive_with_initial_state_structure; use dpp::block::epoch::Epoch; - - use crate::query::proposer_block_count_query::ProposerQueryType; + use dpp::identifier::Identifier; use dpp::version::PlatformVersion; #[test] @@ -139,7 +152,7 @@ mod tests { let platform_version = PlatformVersion::latest(); let transaction = drive.grove.start_transaction(); - let pro_tx_hash: [u8; 32] = rand::random(); + let pro_tx_hash = Identifier::random(); let block_count = 42; let epoch = Epoch::new(0).unwrap(); @@ -148,7 +161,8 @@ mod tests { batch.push(epoch.init_proposers_tree_operation()); - batch.push(epoch.update_proposer_block_count_operation(&pro_tx_hash, block_count)); + batch + .push(epoch.update_proposer_block_count_operation(pro_tx_hash.as_bytes(), block_count)); drive .grove_apply_batch(batch, false, Some(&transaction), &platform_version.drive) @@ -163,6 +177,6 @@ mod tests { ) .expect("should get proposers"); - assert_eq!(result, vec!((pro_tx_hash.to_vec(), block_count))); + assert_eq!(result, vec!((pro_tx_hash, block_count))); } } diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/mod.rs index 8972a1d5441..88606410001 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/mod.rs @@ -4,9 +4,9 @@ use crate::drive::Drive; use crate::error::drive::DriveError; use crate::error::Error; use dpp::block::epoch::Epoch; -use grovedb::TransactionArg; - +use dpp::prelude::TimestampMillis; use dpp::version::PlatformVersion; +use grovedb::TransactionArg; impl Drive { /// Returns the start time of the given Epoch. @@ -25,7 +25,7 @@ impl Drive { epoch_tree: &Epoch, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result, Error> { match platform_version .drive .methods @@ -41,4 +41,37 @@ impl Drive { })), } } + + /// Returns the start time of the given Epoch. + /// + /// # Arguments + /// + /// * `epoch_tree` - An Epoch instance representing the epoch. + /// * `transaction` - A TransactionArg instance. + /// * `platform_version` - A PlatformVersion instance representing the version of the drive. + /// + /// # Returns + /// + /// A Result containing the epoch start time or an Error. + pub fn get_expected_epoch_start_time( + &self, + epoch_tree: &Epoch, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive + .methods + .credit_pools + .epochs + .get_epoch_start_time + { + 0 => self.get_expected_epoch_start_time_v0(epoch_tree, transaction, platform_version), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "get_epoch_start_time".to_string(), + known_versions: vec![0], + received: version, + })), + } + } } diff --git a/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/v0/mod.rs b/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/v0/mod.rs index 390a878b74e..8874dc7fa18 100644 --- a/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/v0/mod.rs +++ b/packages/rs-drive/src/drive/credit_pools/epochs/start_time/get_epoch_start_time/v0/mod.rs @@ -4,6 +4,7 @@ use crate::drive::Drive; use crate::error::drive::DriveError; use crate::error::Error; use dpp::block::epoch::Epoch; +use dpp::prelude::TimestampMillis; use grovedb::{Element, TransactionArg}; use platform_version::version::PlatformVersion; @@ -14,11 +15,11 @@ impl Drive { epoch_tree: &Epoch, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result { + ) -> Result, Error> { let element = self .grove - .get( - &epoch_tree.get_path(), + .get_raw_optional( + (&epoch_tree.get_path()).into(), KEY_START_TIME.as_slice(), transaction, &platform_version.drive.grove_version, @@ -26,20 +27,41 @@ impl Drive { .unwrap() .map_err(Error::GroveDB)?; + let Some(element) = element else { + return Ok(None); + }; + let Element::Item(encoded_start_time, _) = element else { return Err(Error::Drive(DriveError::UnexpectedElementType( "start time must be an item", ))); }; - let start_time = - u64::from_be_bytes(encoded_start_time.as_slice().try_into().map_err(|_| { + let start_time = TimestampMillis::from_be_bytes( + encoded_start_time.as_slice().try_into().map_err(|_| { Error::Drive(DriveError::CorruptedSerialization(String::from( "start time must be u64", ))) - })?); + })?, + ); - Ok(start_time) + Ok(Some(start_time)) + } +} + +impl Drive { + /// Returns the start time of the given Epoch. + pub(super) fn get_expected_epoch_start_time_v0( + &self, + epoch: &Epoch, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + self.get_epoch_start_time_v0(epoch, transaction, platform_version)? + .ok_or(Error::Drive(DriveError::CorruptedDriveState(format!( + "expected start time for epoch {}", + epoch.index + )))) } } @@ -52,6 +74,7 @@ mod tests { mod get_epoch_start_time { use super::*; + use assert_matches::assert_matches; use dpp::version::PlatformVersion; #[test] @@ -63,20 +86,20 @@ mod tests { let non_initiated_epoch_tree = Epoch::new(7000).unwrap(); - let result = drive.get_epoch_start_time( + let result = drive.get_expected_epoch_start_time( &non_initiated_epoch_tree, Some(&transaction), platform_version, ); - assert!(matches!( + assert_matches!( result, - Err(Error::GroveDB(grovedb::Error::PathParentLayerNotFound(_))) - )); + Err(Error::Drive(DriveError::CorruptedDriveState(_))) + ); } #[test] - fn test_error_if_value_is_not_set() { + fn test_none_if_value_is_not_set() { let drive = setup_drive_with_initial_state_structure(None); let transaction = drive.grove.start_transaction(); @@ -87,7 +110,7 @@ mod tests { let result = drive.get_epoch_start_time(&epoch_tree, Some(&transaction), platform_version); - assert!(matches!(result, Err(Error::GroveDB(_)))); + assert_matches!(result, Ok(None)); } #[test] diff --git a/packages/rs-drive/src/drive/group/prove/prove_action_infos/v0/mod.rs b/packages/rs-drive/src/drive/group/prove/prove_action_infos/v0/mod.rs index 1deadafda50..c77716a147b 100644 --- a/packages/rs-drive/src/drive/group/prove/prove_action_infos/v0/mod.rs +++ b/packages/rs-drive/src/drive/group/prove/prove_action_infos/v0/mod.rs @@ -108,7 +108,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -120,6 +119,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([ ( 0, @@ -330,7 +335,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -342,6 +346,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([ ( 0, diff --git a/packages/rs-drive/src/drive/group/prove/prove_action_signers/v0/mod.rs b/packages/rs-drive/src/drive/group/prove/prove_action_signers/v0/mod.rs index 509b3749319..9b837b8bea9 100644 --- a/packages/rs-drive/src/drive/group/prove/prove_action_signers/v0/mod.rs +++ b/packages/rs-drive/src/drive/group/prove/prove_action_signers/v0/mod.rs @@ -103,7 +103,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -115,6 +114,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([ ( 0, @@ -285,7 +290,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -297,6 +301,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([( 0, Group::V0(GroupV0 { @@ -373,7 +383,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -385,6 +394,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([( 0, Group::V0(GroupV0 { diff --git a/packages/rs-drive/src/drive/group/prove/prove_group_info/v0/mod.rs b/packages/rs-drive/src/drive/group/prove/prove_group_info/v0/mod.rs index 189cc63a2a0..64dc4bc471f 100644 --- a/packages/rs-drive/src/drive/group/prove/prove_group_info/v0/mod.rs +++ b/packages/rs-drive/src/drive/group/prove/prove_group_info/v0/mod.rs @@ -81,7 +81,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -93,6 +92,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([( 0, Group::V0(GroupV0 { @@ -151,7 +156,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -163,6 +167,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: Default::default(), }); diff --git a/packages/rs-drive/src/drive/group/prove/prove_group_infos/v0/mod.rs b/packages/rs-drive/src/drive/group/prove/prove_group_infos/v0/mod.rs index cf65df54006..4ab9965950e 100644 --- a/packages/rs-drive/src/drive/group/prove/prove_group_infos/v0/mod.rs +++ b/packages/rs-drive/src/drive/group/prove/prove_group_infos/v0/mod.rs @@ -96,7 +96,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -108,6 +107,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([ ( 0, @@ -221,7 +226,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -233,6 +237,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: BTreeMap::from([ ( 0, diff --git a/packages/rs-drive/src/drive/initialization/v1/mod.rs b/packages/rs-drive/src/drive/initialization/v1/mod.rs index 9b0b8fad1d8..f7d1ad8aae0 100644 --- a/packages/rs-drive/src/drive/initialization/v1/mod.rs +++ b/packages/rs-drive/src/drive/initialization/v1/mod.rs @@ -5,17 +5,20 @@ use crate::util::batch::GroveDbOpBatch; use crate::drive::system::misc_path_vec; use crate::drive::tokens::paths::{ - token_distributions_root_path_vec, token_timed_distributions_path_vec, tokens_root_path_vec, - TOKEN_BALANCES_KEY, TOKEN_BLOCK_TIMED_DISTRIBUTIONS_KEY, TOKEN_DISTRIBUTIONS_KEY, - TOKEN_EPOCH_TIMED_DISTRIBUTIONS_KEY, TOKEN_IDENTITY_INFO_KEY, TOKEN_MS_TIMED_DISTRIBUTIONS_KEY, - TOKEN_PERPETUAL_DISTRIBUTIONS_KEY, TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY, - TOKEN_STATUS_INFO_KEY, TOKEN_TIMED_DISTRIBUTIONS_KEY, + token_distributions_root_path_vec, token_perpetual_distributions_path_vec, + token_root_perpetual_distributions_path_vec, token_timed_distributions_path_vec, + tokens_root_path_vec, TOKEN_BALANCES_KEY, TOKEN_BLOCK_TIMED_DISTRIBUTIONS_KEY, + TOKEN_DISTRIBUTIONS_KEY, TOKEN_EPOCH_TIMED_DISTRIBUTIONS_KEY, TOKEN_IDENTITY_INFO_KEY, + TOKEN_MS_TIMED_DISTRIBUTIONS_KEY, TOKEN_PERPETUAL_DISTRIBUTIONS_KEY, + TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY, TOKEN_STATUS_INFO_KEY, TOKEN_TIMED_DISTRIBUTIONS_KEY, }; use crate::drive::{Drive, RootTree}; use crate::error::Error; use crate::util::batch::grovedb_op_batch::GroveDbOpBatchV0Methods; +use crate::util::grove_operations::BatchInsertTreeApplyType; +use crate::util::object_size_info::PathKeyInfo; use dpp::version::PlatformVersion; -use grovedb::{Element, TransactionArg}; +use grovedb::{Element, TransactionArg, TreeType}; use grovedb_path::SubtreePath; impl Drive { diff --git a/packages/rs-drive/src/drive/system/genesis_time/mod.rs b/packages/rs-drive/src/drive/system/genesis_time/mod.rs index 93addd701d3..56c9de510d6 100644 --- a/packages/rs-drive/src/drive/system/genesis_time/mod.rs +++ b/packages/rs-drive/src/drive/system/genesis_time/mod.rs @@ -7,12 +7,16 @@ use crate::drive::Drive; use crate::error::Error; use dpp::block::epoch::Epoch; use dpp::fee::epoch::GENESIS_EPOCH_INDEX; +use dpp::prelude::TimestampMillis; use dpp::version::PlatformVersion; use grovedb::TransactionArg; impl Drive { /// Returns the genesis time. Checks cache first, then storage. - pub fn get_genesis_time(&self, transaction: TransactionArg) -> Result, Error> { + pub fn get_genesis_time( + &self, + transaction: TransactionArg, + ) -> Result, Error> { // let's first check the cache let genesis_time_ms = self.cache.genesis_time_ms.read(); @@ -24,21 +28,10 @@ impl Drive { drop(genesis_time_ms); - let epoch = Epoch::new(GENESIS_EPOCH_INDEX).unwrap(); + let epoch = Epoch::new(GENESIS_EPOCH_INDEX) + .expect("expected to be able to create epoch with genesis time"); - match self.get_epoch_start_time(&epoch, transaction, platform_version) { - Ok(genesis_time_ms) => { - let mut genesis_time_ms_cache = self.cache.genesis_time_ms.write(); - - *genesis_time_ms_cache = Some(genesis_time_ms); - - Ok(Some(genesis_time_ms)) - } - Err(Error::GroveDB( - grovedb::Error::PathParentLayerNotFound(_) | grovedb::Error::PathKeyNotFound(_), - )) => Ok(None), - Err(e) => Err(e), - } + self.get_epoch_start_time(&epoch, transaction, platform_version) } /// Sets genesis time diff --git a/packages/rs-drive/src/drive/tokens/balance/prove_identities_token_balances/v0/mod.rs b/packages/rs-drive/src/drive/tokens/balance/prove_identities_token_balances/v0/mod.rs index d25dc8fe165..44c1ba25cc4 100644 --- a/packages/rs-drive/src/drive/tokens/balance/prove_identities_token_balances/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/balance/prove_identities_token_balances/v0/mod.rs @@ -75,7 +75,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -87,6 +86,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, @@ -169,7 +174,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -181,6 +185,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, @@ -255,7 +265,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -267,6 +276,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, diff --git a/packages/rs-drive/src/drive/tokens/balance/prove_identity_token_balances/v0/mod.rs b/packages/rs-drive/src/drive/tokens/balance/prove_identity_token_balances/v0/mod.rs index fad0b5357b0..56031f6d3d9 100644 --- a/packages/rs-drive/src/drive/tokens/balance/prove_identity_token_balances/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/balance/prove_identity_token_balances/v0/mod.rs @@ -74,7 +74,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -86,6 +85,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([ ( @@ -233,7 +238,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -245,6 +249,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, @@ -315,7 +325,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -327,6 +336,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, diff --git a/packages/rs-drive/src/drive/tokens/distribution/add_perpetual_distribution/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/add_perpetual_distribution/v0/mod.rs index 7cefe3e95fc..b70d752e55f 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/add_perpetual_distribution/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/add_perpetual_distribution/v0/mod.rs @@ -1,19 +1,22 @@ use crate::drive::tokens::paths::{ - token_root_perpetual_distributions_path_vec, TokenPerpetualDistributionPaths, - TOKEN_PERPETUAL_DISTRIBUTIONS_KEY, + token_perpetual_distributions_path_vec, token_root_perpetual_distributions_path_vec, + TokenPerpetualDistributionPaths, TOKEN_PERPETUAL_DISTRIBUTIONS_FIRST_EVENT_KEY, + TOKEN_PERPETUAL_DISTRIBUTIONS_INFO_KEY, TOKEN_PERPETUAL_DISTRIBUTIONS_KEY, }; use crate::drive::Drive; use crate::error::drive::DriveError; use crate::error::Error; use crate::fees::op::LowLevelDriveOperation; -use crate::util::grove_operations::{BatchInsertApplyType, QueryTarget}; -use crate::util::object_size_info::PathKeyElementInfo; +use crate::util::grove_operations::BatchInsertTreeApplyType; +use crate::util::object_size_info::{PathKeyElementInfo, PathKeyInfo}; use crate::util::storage_flags::StorageFlags; use dpp::block::block_info::BlockInfo; use dpp::data_contract::associated_token::token_distribution_key::{ - DistributionType, TokenDistributionKey, + TokenDistributionKey, TokenDistributionType, +}; +use dpp::data_contract::associated_token::token_perpetual_distribution::methods::v0::{ + TokenPerpetualDistributionV0Accessors, TokenPerpetualDistributionV0Methods, }; -use dpp::data_contract::associated_token::token_perpetual_distribution::methods::v0::TokenPerpetualDistributionV0Accessors; use dpp::data_contract::associated_token::token_perpetual_distribution::TokenPerpetualDistribution; use dpp::serialization::PlatformSerializable; use dpp::version::PlatformVersion; @@ -37,38 +40,39 @@ impl Drive { transaction: TransactionArg, platform_version: &PlatformVersion, ) -> Result<(), Error> { - if estimated_costs_only_with_layer_info.is_some() { - // Drive::add_estimation_costs_for_perpetual_distribution( - // estimated_costs_only_with_layer_info, - // &platform_version.drive, - // )?; + if let Some(estimated_costs_only_with_layer_info) = estimated_costs_only_with_layer_info { + Drive::add_estimation_costs_for_token_perpetual_distribution( + Some(token_id), + estimated_costs_only_with_layer_info, + &platform_version.drive, + )?; } let serialized_distribution = distribution.serialize_to_bytes()?; // Storage flags for cleanup logic let storage_flags = StorageFlags::new_single_epoch(block_info.epoch.index, Some(owner_id)); - // Path for the perpetual distribution tree - let perpetual_distributions_path = token_root_perpetual_distributions_path_vec(); + let root_perpetual_distributions_path = token_root_perpetual_distributions_path_vec(); + + let perpetual_distributions_path = token_perpetual_distributions_path_vec(token_id); - let insert_type = if estimated_costs_only_with_layer_info.is_none() { - BatchInsertApplyType::StatefulBatchInsert + let tree_apply_type = if estimated_costs_only_with_layer_info.is_none() { + BatchInsertTreeApplyType::StatefulBatchInsertTree } else { - BatchInsertApplyType::StatelessBatchInsert { + BatchInsertTreeApplyType::StatelessBatchInsertTree { in_tree_type: TreeType::NormalTree, - target: QueryTarget::QueryTargetValue(serialized_distribution.len() as u32), + tree_type: TreeType::NormalTree, + flags_len: 0, } }; - // We do a `if_not_exists` just to be extra careful - let inserted = self.batch_insert_if_not_exists( - PathKeyElementInfo::<0>::PathKeyElement(( - perpetual_distributions_path, - token_id.to_vec(), - Element::new_item(serialized_distribution), - )), - insert_type, + let inserted = self.batch_insert_empty_tree_if_not_exists( + PathKeyInfo::<0>::PathKey((root_perpetual_distributions_path, token_id.to_vec())), + TreeType::NormalTree, + None, + tree_apply_type, transaction, + &mut None, batch_operations, &platform_version.drive, )?; @@ -77,14 +81,36 @@ impl Drive { return Err(Error::Drive(DriveError::CorruptedCodeExecution("we can not insert the perpetual distribution as it already existed, this should have been validated before insertion"))); } + self.batch_insert( + PathKeyElementInfo::<0>::PathKeyElement(( + perpetual_distributions_path.clone(), + vec![TOKEN_PERPETUAL_DISTRIBUTIONS_INFO_KEY], + Element::new_item(serialized_distribution), + )), + batch_operations, + &platform_version.drive, + )?; + + let next_interval = distribution.next_interval(block_info); + + self.batch_insert( + PathKeyElementInfo::<0>::PathKeyElement(( + perpetual_distributions_path, + vec![TOKEN_PERPETUAL_DISTRIBUTIONS_FIRST_EVENT_KEY], + Element::new_item(next_interval.to_be_bytes().to_vec()), + )), + batch_operations, + &platform_version.drive, + )?; + // We will distribute for the first time on the next interval let distribution_path_for_next_interval = - distribution.distribution_path_for_next_interval(block_info); + distribution.distribution_path_for_next_interval_from_block_info(block_info); let distribution_key = TokenDistributionKey { token_id: token_id.into(), recipient: distribution.distribution_recipient(), - distribution_type: DistributionType::Perpetual, + distribution_type: TokenDistributionType::Perpetual, }; let serialized_key = distribution_key.serialize_consume_to_bytes()?; diff --git a/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/mod.rs index 1c7413d0f5e..f40ff71c2d3 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/mod.rs @@ -37,6 +37,102 @@ impl Drive { /// - Uses reference paths to map distributions to their corresponding execution times. /// - Prevents overflow errors by ensuring token amounts do not exceed `i64::MAX`. /// + /// # Tree structure + /// ```text + /// + /// [ROOT: Tokens] + /// │ + /// ┌───────────────────┴───────────────────┐ + /// │ │ + /// [TOKEN_STATUS_INFO_KEY] [TOKEN_DISTRIBUTIONS_KEY] + /// │ + /// ├─────────────────────────┐ + /// │ │ + /// [TOKEN_TIMED_DISTRIBUTIONS_KEY] [TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY] + /// │ │ + /// │ │ + /// ┌────────────┴────────────┐ │ + /// │ │ │ + /// [TOKEN_MS_TIMED_DISTRIBUTIONS_KEY] ... (other timed trees) │ + /// │ │ + /// │ For each token (token_id) + /// │ │ + /// │ ┌──────────┴──────────┐ + /// │ │ │ + /// │ token_id (e.g., TKN) ... + /// │ │ + /// │ │ + /// │ For each distribution time: + /// │ │ + /// │ ┌───────┴───────┐ + /// │ │ time (ts) │ <-- Key: timestamp (8 bytes) + /// │ └───────┬───────┘ + /// │ │ + /// │ [ Sum Tree: Recipient → Amount ] + /// │ │ + /// │ ┌────────────────┴────────────────┐ + /// │ │ │ │ + /// │ recipient A -> amount recipient B -> amount, etc. <──────┐ + /// │ │ + /// └──────────────────────────────────────────────────── │ + /// (Separate branch) │ + /// └─ In the TIMED DISTRIBUTIONS branch: │ + /// For each time: │ + /// ┌─────────────┐ │ + /// │ time (ts) │ <-- Key: timestamp (8 bytes) │ + /// └─────┬───────┘ │ + /// │ │ + /// [ Reference Tree: Serialized Distribution Keys ] ────────┘ + /// ``` + + /// # Explanation of Each Layer + /// ```text + /// 1. Root Level (Tokens): + /// • The top-level of the tree corresponds to the tokens in the system. The path starts with the root identifier for tokens. + /// 2. Distributions Branch (TOKEN_DISTRIBUTIONS_KEY): + /// • Under the Tokens root, there is a branch reserved for token distributions. + /// • This branch holds several subtrees for different kinds of distribution data. + /// 3. Timed vs. Pre-Programmed Distribution Subtrees: + /// • Timed Distributions: + /// • Located under TOKEN_TIMED_DISTRIBUTIONS_KEY, these trees help organize distributions by time for features like verifying the exact moment a distribution was made. + /// • For example, the Millisecond Timed Distributions branch (TOKEN_MS_TIMED_DISTRIBUTIONS_KEY) contains nodes for each timestamp. + /// • Pre-Programmed Distributions: + /// • Located under TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY, this branch stores pre-programmed distribution data. + /// • For each token, a subtree is created using the token’s identifier. + /// • Under that, each distribution time (converted to an 8‑byte big‑endian key) gets its own node (a sum tree). + /// 4. Inside Each Pre-Programmed Time Node: + /// • Each node for a specific timestamp is a sum tree that holds key–value pairs where: + /// • Key: The recipient’s identifier (the person or entity receiving tokens). + /// • Value: The token amount (stored as a sum item). + /// • This is the core data for a pre-programmed distribution at a specific time. + /// 5. Reference Insertion in Timed Distributions: + /// • In parallel, a reference is inserted into the Millisecond Timed Distributions branch for the same timestamp. + /// • This reference links back to the pre-programmed distribution data. It uses a TokenDistributionKey (which includes the token ID, recipient, and distribution type) to serialize and store a reference. + /// • This reference is stored in a subtree keyed by the timestamp (again, 8 bytes). + /// ``` + + /// # What the Function Does + /// ```text + /// 1. Insert the Pre-Programmed Distributions Tree: + /// • First, the function ensures that the subtree for pre‑programmed distributions for the token exists. + /// • It uses the fixed path: + /// [RootTree::Tokens, TOKEN_DISTRIBUTIONS_KEY, TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY, token_id]. + /// 2. For Each Distribution Time: + /// • The function iterates over each distribution time (from the provided distribution data). + /// • It inserts an empty sum tree for that time into the pre‑programmed subtree using the timestamp as a key. + /// • It then creates a corresponding entry in the millisecond timed distributions branch. + /// This involves: + /// • Inserting an empty tree if necessary, with storage flags. + /// • Creating a reference path using the distribution key and additional metadata. + /// • Finally, for each recipient at that timestamp, it inserts: + /// • A sum item into the pre‑programmed distribution tree with the recipient’s identifier and the token amount. + /// • A reference into the timed distributions tree that points back to the pre‑programmed entry. + /// 3. Why This Structure? + /// • This hierarchical tree structure allows for efficient queries and proofs. + /// • You can query by token, then by time, and then by recipient, and also verify that distributions were made at specific times. + /// • The reference links between the pre-programmed and timed distributions trees help verify the ordering and correctness of distribution events. + /// ``` + /// /// # Returns /// - `Ok(())` if the distributions are successfully added. /// - `Err(Error::Drive(DriveError::UnknownVersionMismatch))` if an unsupported platform version diff --git a/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/v0/mod.rs index 800e7385b89..8423dbc4de9 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/add_pre_programmed_distribution/v0/mod.rs @@ -12,7 +12,7 @@ use crate::util::object_size_info::{DriveKeyInfo, PathInfo, PathKeyElementInfo}; use crate::util::storage_flags::StorageFlags; use dpp::block::block_info::BlockInfo; use dpp::data_contract::associated_token::token_distribution_key::{ - DistributionType, TokenDistributionKey, + TokenDistributionKey, TokenDistributionType, }; use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; use dpp::data_contract::associated_token::token_pre_programmed_distribution::methods::v0::TokenPreProgrammedDistributionV0Methods; @@ -25,6 +25,94 @@ use grovedb::reference_path::ReferencePathType; use grovedb::{Element, EstimatedLayerInformation, TransactionArg, TreeType}; use std::collections::HashMap; +// [ROOT: Tokens] +// │ +// ┌───────────────────┴───────────────────┐ +// │ │ +// [TOKEN_STATUS_INFO_KEY] [TOKEN_DISTRIBUTIONS_KEY] +// │ +// ├─────────────────────────┐ +// │ │ +// [TOKEN_TIMED_DISTRIBUTIONS_KEY] [TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY] +// │ │ +// │ │ +// ┌────────────┴────────────┐ │ +// │ │ │ +// [TOKEN_MS_TIMED_DISTRIBUTIONS_KEY] ... (other timed trees) │ +// │ │ +// │ For each token (token_id) +// │ │ +// │ ┌──────────┴──────────┐ +// │ │ │ +// │ token_id (e.g., TKN) ... +// │ │ +// │ │ +// │ For each distribution time: +// │ │ +// │ ┌───────┴───────┐ +// │ │ time (ts) │ <-- Key: timestamp (8 bytes) +// │ └───────┬───────┘ +// │ │ +// │ [ Sum Tree: Recipient → Amount ] +// │ │ +// │ ┌────────────────┴────────────────┐ +// │ │ │ │ +// │ recipient A -> amount recipient B -> amount, etc. <──────┐ +// │ │ +// └──────────────────────────────────────────────────── │ +// (Separate branch) │ +// └─ In the TIMED DISTRIBUTIONS branch: │ +// For each time: │ +// ┌─────────────┐ │ +// │ time (ts) │ <-- Key: timestamp (8 bytes) │ +// └─────┬───────┘ │ +// │ │ +// [ Reference Tree: Serialized Distribution Keys ] ────────┘ + +// Explanation of Each Layer +// 1. Root Level (Tokens): +// • The top-level of the tree corresponds to the tokens in the system. The path starts with the root identifier for tokens. +// 2. Distributions Branch (TOKEN_DISTRIBUTIONS_KEY): +// • Under the Tokens root, there is a branch reserved for token distributions. +// • This branch holds several subtrees for different kinds of distribution data. +// 3. Timed vs. Pre-Programmed Distribution Subtrees: +// • Timed Distributions: +// • Located under TOKEN_TIMED_DISTRIBUTIONS_KEY, these trees help organize distributions by time for features like verifying the exact moment a distribution was made. +// • For example, the Millisecond Timed Distributions branch (TOKEN_MS_TIMED_DISTRIBUTIONS_KEY) contains nodes for each timestamp. +// • Pre-Programmed Distributions: +// • Located under TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY, this branch stores pre-programmed distribution data. +// • For each token, a subtree is created using the token’s identifier. +// • Under that, each distribution time (converted to an 8‑byte big‑endian key) gets its own node (a sum tree). +// 4. Inside Each Pre-Programmed Time Node: +// • Each node for a specific timestamp is a sum tree that holds key–value pairs where: +// • Key: The recipient’s identifier (the person or entity receiving tokens). +// • Value: The token amount (stored as a sum item). +// • This is the core data for a pre-programmed distribution at a specific time. +// 5. Reference Insertion in Timed Distributions: +// • In parallel, a reference is inserted into the Millisecond Timed Distributions branch for the same timestamp. +// • This reference links back to the pre-programmed distribution data. It uses a TokenDistributionKey (which includes the token ID, recipient, and distribution type) to serialize and store a reference. +// • This reference is stored in a subtree keyed by the timestamp (again, 8 bytes). + +// What the Function Does +// 1. Insert the Pre-Programmed Distributions Tree: +// • First, the function ensures that the subtree for pre‑programmed distributions for the token exists. +// • It uses the fixed path: +// [RootTree::Tokens, TOKEN_DISTRIBUTIONS_KEY, TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY, token_id]. +// 2. For Each Distribution Time: +// • The function iterates over each distribution time (from the provided distribution data). +// • It inserts an empty sum tree for that time into the pre‑programmed subtree using the timestamp as a key. +// • It then creates a corresponding entry in the millisecond timed distributions branch. +// This involves: +// • Inserting an empty tree if necessary, with storage flags. +// • Creating a reference path using the distribution key and additional metadata. +// • Finally, for each recipient at that timestamp, it inserts: +// • A sum item into the pre‑programmed distribution tree with the recipient’s identifier and the token amount. +// • A reference into the timed distributions tree that points back to the pre‑programmed entry. +// 3. Why This Structure? +// • This hierarchical tree structure allows for efficient queries and proofs. +// • You can query by token, then by time, and then by recipient, and also verify that distributions were made at specific times. +// • The reference links between the pre-programmed and timed distributions trees help verify the ordering and correctness of distribution events. + impl Drive { /// Version 0 of `add_perpetual_distribution` pub(super) fn add_pre_programmed_distributions_v0( @@ -151,7 +239,7 @@ impl Drive { let distribution_key = TokenDistributionKey { token_id: token_id.into(), recipient: TokenDistributionRecipient::Identity(*recipient), - distribution_type: DistributionType::PreProgrammed, + distribution_type: TokenDistributionType::PreProgrammed, }; let serialized_key = distribution_key.serialize_consume_to_bytes()?; diff --git a/packages/rs-drive/src/drive/tokens/distribution/fetch/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/fetch/mod.rs index 9396b4148ce..c5158306db1 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/fetch/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/fetch/mod.rs @@ -1,104 +1,2 @@ +mod perpetual_distribution_last_paid_moment; mod pre_programmed_distributions; - -use crate::drive::tokens::distribution::queries::QueryPreProgrammedDistributionStartAt; -use crate::drive::Drive; -use crate::error::drive::DriveError; -use crate::error::Error; -use crate::fees::op::LowLevelDriveOperation; -use dpp::balances::credits::TokenAmount; -use dpp::prelude::{Identifier, TimestampMillis}; -use dpp::version::PlatformVersion; -use grovedb::TransactionArg; -use std::collections::BTreeMap; - -impl Drive { - /// Fetches the pre‑programmed distributions for a token, using the appropriate versioned method. - /// - /// This method queries the pre‑programmed distributions tree at the path - /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: - /// - /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. - /// - **Inner keys:** Recipient identifiers (`Identifier`). - /// - **Values:** Token amounts (`TokenAmount`). - /// - /// The method dispatches to the correct versioned implementation based on the `platform_version`. - /// - /// # Parameters - /// - /// - `token_id`: The 32‑byte identifier for the token. - /// - `transaction`: The current GroveDB transaction. - /// - `platform_version`: The platform version to determine the method variant. - /// - /// # Returns - /// - /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. - pub fn fetch_token_pre_programmed_distributions( - &self, - token_id: [u8; 32], - start_at: Option, - limit: Option, - transaction: TransactionArg, - platform_version: &PlatformVersion, - ) -> Result>, Error> { - self.fetch_token_pre_programmed_distributions_operations( - token_id, - start_at, - limit, - &mut vec![], - transaction, - platform_version, - ) - } - /// Fetches the pre‑programmed distributions for a token, using the appropriate versioned method. - /// - /// This method queries the pre‑programmed distributions tree at the path - /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: - /// - /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. - /// - **Inner keys:** Recipient identifiers (`Identifier`). - /// - **Values:** Token amounts (`TokenAmount`). - /// - /// The method dispatches to the correct versioned implementation based on the `platform_version`. - /// - /// # Parameters - /// - /// - `token_id`: The 32‑byte identifier for the token. - /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. - /// - `transaction`: The current GroveDB transaction. - /// - `platform_version`: The platform version to determine the method variant. - /// - /// # Returns - /// - /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. - pub(crate) fn fetch_token_pre_programmed_distributions_operations( - &self, - token_id: [u8; 32], - start_at: Option, - limit: Option, - drive_operations: &mut Vec, - transaction: TransactionArg, - platform_version: &PlatformVersion, - ) -> Result>, Error> { - match platform_version - .drive - .methods - .token - .fetch - .pre_programmed_distributions - { - 0 => self.fetch_token_pre_programmed_distributions_operations_v0( - token_id, - start_at, - limit, - drive_operations, - transaction, - platform_version, - ), - version => Err(Error::Drive(DriveError::UnknownVersionMismatch { - method: "fetch_token_pre_programmed_distributions_operations".to_string(), - known_versions: vec![0], - received: version, - })), - } - } -} diff --git a/packages/rs-drive/src/drive/tokens/distribution/fetch/perpetual_distribution_last_paid_moment/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/fetch/perpetual_distribution_last_paid_moment/mod.rs new file mode 100644 index 00000000000..23e94e8b071 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/fetch/perpetual_distribution_last_paid_moment/mod.rs @@ -0,0 +1,98 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::prelude::Identifier; +use dpp::version::PlatformVersion; +use grovedb::TransactionArg; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_type::RewardDistributionType; + +impl Drive { + /// Fetches the last paid timestamp for a perpetual distribution for a given identity, + /// using the appropriate versioned method. + /// + /// This method queries the perpetual distributions tree at the path + /// `perpetual_distribution_last_paid_time_path_vec(token_id, identity_id)`. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `identity_id`: The identifier of the identity whose last paid time is being queried. + /// - `distribution_type`: The distribution type known from the Token configuration. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to determine the method variant. + /// + /// # Returns + /// + /// A `Result` containing the last paid `RewardDistributionMoment` on success or an `Error` on failure. + pub fn fetch_perpetual_distribution_last_paid_moment( + &self, + token_id: [u8; 32], + identity_id: Identifier, + distribution_type: &RewardDistributionType, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + self.fetch_perpetual_distribution_last_paid_moment_operations( + token_id, + identity_id, + distribution_type, + &mut vec![], + transaction, + platform_version, + ) + } + + /// Fetches the last paid timestamp for a perpetual distribution for a given identity, + /// using the appropriate versioned method. + /// + /// This method queries the perpetual distributions tree at the path + /// `perpetual_distribution_last_paid_time_path_vec(token_id, identity_id)`. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `identity_id`: The identifier of the identity whose last paid time is being queried. + /// - `distribution_type`: The distribution type known from the Token configuration. + /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to determine the method variant. + /// + /// # Returns + /// + /// A `Result` containing the last paid `RewardDistributionMoment` on success or an `Error` on failure. + pub(crate) fn fetch_perpetual_distribution_last_paid_moment_operations( + &self, + token_id: [u8; 32], + identity_id: Identifier, + distribution_type: &RewardDistributionType, + drive_operations: &mut Vec, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + match platform_version + .drive + .methods + .token + .fetch + .perpetual_distribution_last_paid_time + { + 0 => self.fetch_perpetual_distribution_last_paid_moment_operations_v0( + token_id, + identity_id, + distribution_type, + drive_operations, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "fetch_perpetual_distribution_last_paid_moment_operations".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/fetch/perpetual_distribution_last_paid_moment/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/fetch/perpetual_distribution_last_paid_moment/v0/mod.rs new file mode 100644 index 00000000000..a7d86816cb0 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/fetch/perpetual_distribution_last_paid_moment/v0/mod.rs @@ -0,0 +1,74 @@ +use crate::drive::tokens::paths::{token_perpetual_distributions_identity_last_claimed_time_path}; +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use crate::util::grove_operations::DirectQueryType; +use dpp::identifier::Identifier; +use dpp::version::PlatformVersion; +use grovedb::Element::Item; +use grovedb::TransactionArg; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_type::RewardDistributionType; + +impl Drive { + /// Fetches the last paid timestamp for a perpetual distribution for a given identity. + /// + /// This method queries the `token_perpetual_distributions_path_vec(token_id)` tree and + /// retrieves the last recorded payment timestamp (`TimestampMillis`) associated with + /// `identity_id`. The timestamp is expected to be stored as an 8-byte big-endian value. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `identity_id`: The identifier of the identity whose last paid time is being queried. + /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to determine the method variant. + /// + /// # Returns + /// + /// A `Result` containing `Some(RewardDistributionMoment)` if a record exists, `None` if no record is found, + /// or an `Error` if retrieval fails. + pub(super) fn fetch_perpetual_distribution_last_paid_moment_operations_v0( + &self, + token_id: [u8; 32], + identity_id: Identifier, + distribution_type: &RewardDistributionType, + drive_operations: &mut Vec, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + let direct_query_type = DirectQueryType::StatefulDirectQuery; + + let perpetual_distributions_path = + token_perpetual_distributions_identity_last_claimed_time_path(&token_id); + + match self.grove_get_raw_optional( + (&perpetual_distributions_path).into(), + identity_id.as_slice(), + direct_query_type, + transaction, + drive_operations, + &platform_version.drive, + ) { + Ok(Some(Item(value, _))) => { + let moment = distribution_type.moment_from_bytes(&value).map_err(|e| { + Error::Drive(DriveError::CorruptedDriveState(format!( + "Moment should be specific amount of bytes: {}", + e + ))) + })?; + Ok(Some(moment)) + } + + Ok(None) | Err(Error::GroveDB(grovedb::Error::PathKeyNotFound(_))) => Ok(None), + + Ok(Some(_)) => Err(Error::Drive(DriveError::CorruptedElementType( + "Last moment was present but was not an item", + ))), + + Err(e) => Err(e), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/fetch/pre_programmed_distributions/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/fetch/pre_programmed_distributions/mod.rs index 1b642c6eaf4..50e1d5ddf4f 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/fetch/pre_programmed_distributions/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/fetch/pre_programmed_distributions/mod.rs @@ -1,41 +1,77 @@ -use crate::drive::tokens::distribution::queries::{ - pre_programmed_distributions_query, QueryPreProgrammedDistributionStartAt, -}; +mod v0; + +use crate::drive::tokens::distribution::queries::QueryPreProgrammedDistributionStartAt; use crate::drive::Drive; use crate::error::drive::DriveError; use crate::error::Error; use crate::fees::op::LowLevelDriveOperation; use dpp::balances::credits::TokenAmount; -use dpp::identifier::Identifier; -use dpp::identity::TimestampMillis; -use grovedb::query_result_type::{QueryResultElement, QueryResultType}; +use dpp::prelude::Identifier; +use dpp::version::PlatformVersion; use grovedb::TransactionArg; -use platform_version::version::PlatformVersion; use std::collections::BTreeMap; impl Drive { - /// Fetches the pre‑programmed distributions for a token. + /// Fetches the pre‑programmed distributions for a token, using the appropriate versioned method. + /// + /// This method queries the pre‑programmed distributions tree at the path + /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: /// - /// This method queries the backing store for the pre‑programmed distributions tree at the path - /// defined by `token_pre_programmed_distributions_path_vec(token_id)`. It then extracts a nested - /// mapping where: + /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. + /// - **Inner keys:** Recipient identifiers (`Identifier`). + /// - **Values:** Token amounts (`TokenAmount`). /// - /// - **Outer keys:** Are timestamps (`TimestampMillis`) representing each distribution time, - /// extracted from the 5th path component (index 4). The time is expected to be stored as 4 bytes in big‑endian. - /// - **Inner keys:** Are recipient identifiers (`Identifier`) derived from the query key. - /// - **Values:** Are token amounts (`TokenAmount`), extracted from elements that are sum items. + /// The method dispatches to the correct versioned implementation based on the `platform_version`. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to determine the method variant. + /// + /// # Returns + /// + /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. + pub fn fetch_token_pre_programmed_distributions( + &self, + token_id: [u8; 32], + start_at: Option, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result>, Error> + { + self.fetch_token_pre_programmed_distributions_operations( + token_id, + start_at, + limit, + &mut vec![], + transaction, + platform_version, + ) + } + /// Fetches the pre‑programmed distributions for a token, using the appropriate versioned method. + /// + /// This method queries the pre‑programmed distributions tree at the path + /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: + /// + /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. + /// - **Inner keys:** Recipient identifiers (`Identifier`). + /// - **Values:** Token amounts (`TokenAmount`). + /// + /// The method dispatches to the correct versioned implementation based on the `platform_version`. /// /// # Parameters /// /// - `token_id`: The 32‑byte identifier for the token. /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. /// - `transaction`: The current GroveDB transaction. - /// - `platform_version`: The platform version to use. + /// - `platform_version`: The platform version to determine the method variant. /// /// # Returns /// /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. - pub(super) fn fetch_token_pre_programmed_distributions_operations_v0( + pub(crate) fn fetch_token_pre_programmed_distributions_operations( &self, token_id: [u8; 32], start_at: Option, @@ -43,47 +79,28 @@ impl Drive { drive_operations: &mut Vec, transaction: TransactionArg, platform_version: &PlatformVersion, - ) -> Result>, Error> { - let path_query = pre_programmed_distributions_query(token_id, start_at, limit); - - let results = self - .grove_get_raw_path_query( - &path_query, - transaction, - QueryResultType::QueryPathKeyElementTrioResultType, + ) -> Result>, Error> + { + match platform_version + .drive + .methods + .token + .fetch + .pre_programmed_distributions + { + 0 => self.fetch_token_pre_programmed_distributions_operations_v0( + token_id, + start_at, + limit, drive_operations, - &platform_version.drive, - )? - .0; - - let mut map: BTreeMap> = BTreeMap::new(); - - for result_item in results.elements.into_iter() { - if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, element)) = - result_item - { - if let Some(last) = path.pop() { - if last.len() != 8 { - return Err(Error::Drive(DriveError::CorruptedDriveState( - format!("time key in pre-programmed distributions is not 8 bytes, got {} bytes instead", last.len()), - ))); - } - let mut time_bytes = [0u8; 8]; - time_bytes.copy_from_slice(last.as_slice()); - let time = TimestampMillis::from_be_bytes(time_bytes); - let recipient = Identifier::from_bytes(key.as_slice())?; - let sum_item = element.as_sum_item_value()?; - if sum_item < 0 { - return Err(Error::Drive(DriveError::CorruptedDriveState( - "negative token amount in pre-programmed distribution".to_string(), - ))); - } - let token_amount: TokenAmount = sum_item as TokenAmount; - map.entry(time).or_default().insert(recipient, token_amount); - } - } + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "fetch_token_pre_programmed_distributions_operations".to_string(), + known_versions: vec![0], + received: version, + })), } - - Ok(map) } } diff --git a/packages/rs-drive/src/drive/tokens/distribution/fetch/pre_programmed_distributions/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/fetch/pre_programmed_distributions/v0/mod.rs new file mode 100644 index 00000000000..1b642c6eaf4 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/fetch/pre_programmed_distributions/v0/mod.rs @@ -0,0 +1,89 @@ +use crate::drive::tokens::distribution::queries::{ + pre_programmed_distributions_query, QueryPreProgrammedDistributionStartAt, +}; +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::balances::credits::TokenAmount; +use dpp::identifier::Identifier; +use dpp::identity::TimestampMillis; +use grovedb::query_result_type::{QueryResultElement, QueryResultType}; +use grovedb::TransactionArg; +use platform_version::version::PlatformVersion; +use std::collections::BTreeMap; + +impl Drive { + /// Fetches the pre‑programmed distributions for a token. + /// + /// This method queries the backing store for the pre‑programmed distributions tree at the path + /// defined by `token_pre_programmed_distributions_path_vec(token_id)`. It then extracts a nested + /// mapping where: + /// + /// - **Outer keys:** Are timestamps (`TimestampMillis`) representing each distribution time, + /// extracted from the 5th path component (index 4). The time is expected to be stored as 4 bytes in big‑endian. + /// - **Inner keys:** Are recipient identifiers (`Identifier`) derived from the query key. + /// - **Values:** Are token amounts (`TokenAmount`), extracted from elements that are sum items. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to use. + /// + /// # Returns + /// + /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. + pub(super) fn fetch_token_pre_programmed_distributions_operations_v0( + &self, + token_id: [u8; 32], + start_at: Option, + limit: Option, + drive_operations: &mut Vec, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result>, Error> { + let path_query = pre_programmed_distributions_query(token_id, start_at, limit); + + let results = self + .grove_get_raw_path_query( + &path_query, + transaction, + QueryResultType::QueryPathKeyElementTrioResultType, + drive_operations, + &platform_version.drive, + )? + .0; + + let mut map: BTreeMap> = BTreeMap::new(); + + for result_item in results.elements.into_iter() { + if let QueryResultElement::PathKeyElementTrioResultItem((mut path, key, element)) = + result_item + { + if let Some(last) = path.pop() { + if last.len() != 8 { + return Err(Error::Drive(DriveError::CorruptedDriveState( + format!("time key in pre-programmed distributions is not 8 bytes, got {} bytes instead", last.len()), + ))); + } + let mut time_bytes = [0u8; 8]; + time_bytes.copy_from_slice(last.as_slice()); + let time = TimestampMillis::from_be_bytes(time_bytes); + let recipient = Identifier::from_bytes(key.as_slice())?; + let sum_item = element.as_sum_item_value()?; + if sum_item < 0 { + return Err(Error::Drive(DriveError::CorruptedDriveState( + "negative token amount in pre-programmed distribution".to_string(), + ))); + } + let token_amount: TokenAmount = sum_item as TokenAmount; + map.entry(time).or_default().insert(recipient, token_amount); + } + } + } + + Ok(map) + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/mark_perpetual_release_as_distributed/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/mark_perpetual_release_as_distributed/mod.rs new file mode 100644 index 00000000000..86c8b28ecf7 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/mark_perpetual_release_as_distributed/mod.rs @@ -0,0 +1,86 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::block::block_info::BlockInfo; +use dpp::version::PlatformVersion; +use grovedb::batch::KeyInfoPath; +use grovedb::{EstimatedLayerInformation, TransactionArg}; +use std::collections::HashMap; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; + +impl Drive { + /// Marks a perpetual token release as distributed in the state tree. + /// + /// This function updates the perpetual distribution record by: + /// - Removing the previous distribution moment. + /// - Setting the new distribution moment. + /// - Associating the new distribution with the correct recipient. + /// + /// # Parameters + /// - `token_id`: The unique identifier of the token. + /// - `owner_id`: The unique identifier of the owner who initiated the distribution. + /// - `previous_moment`: The previous moment when the reward was last distributed. + /// - `next_moment`: The next moment when the reward should be distributed. + /// - `distribution_recipient`: The recipient of the distributed reward. + /// - `block_info`: Metadata about the current block, including epoch details. + /// - `estimated_costs_only_with_layer_info`: Optional storage layer information for cost estimation. + /// - `batch_operations`: A mutable reference to the batch operation queue. + /// - `transaction`: The transaction context. + /// - `platform_version`: The current platform version. + /// + /// # Returns + /// - `Ok(())` if the operation succeeds. + /// - `Err(Error::Drive(DriveError::UnknownVersionMismatch))` if an unsupported version is encountered. + /// + /// # Behavior + /// - If `estimated_costs_only_with_layer_info` is `Some`, the function only estimates costs. + /// - The previous distribution entry is deleted from the tree. + /// - The new distribution entry is inserted with a reference to the corresponding recipient. + /// + /// # Versioning + /// - Uses version 0 of `mark_perpetual_release_as_distributed_operations_v0` if supported. + /// - Returns an error if an unknown version is received. + pub fn mark_perpetual_release_as_distributed_operations( + &self, + token_id: [u8; 32], + owner_id: [u8; 32], + previous_moment: RewardDistributionMoment, + next_moment: RewardDistributionMoment, + distribution_recipient: TokenDistributionRecipient, + block_info: &BlockInfo, + estimated_costs_only_with_layer_info: &mut Option< + HashMap, + >, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + match platform_version + .drive + .methods + .token + .distribution + .mark_perpetual_release_as_distributed + { + 0 => self.mark_perpetual_release_as_distributed_operations_v0( + token_id, + owner_id, + previous_moment, + next_moment, + distribution_recipient, + block_info, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "mark_perpetual_release_as_distributed".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/mark_perpetual_release_as_distributed/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/mark_perpetual_release_as_distributed/v0/mod.rs new file mode 100644 index 00000000000..91ee22b4282 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/mark_perpetual_release_as_distributed/v0/mod.rs @@ -0,0 +1,121 @@ +use crate::drive::tokens::paths::{token_perpetual_distributions_path_vec, TokenPerpetualDistributionMomentPaths, TOKEN_PERPETUAL_DISTRIBUTIONS_KEY, TOKEN_PERPETUAL_DISTRIBUTIONS_FIRST_EVENT_KEY}; +use crate::drive::Drive; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use crate::util::object_size_info::{PathKeyElementInfo}; +use crate::util::storage_flags::StorageFlags; +use dpp::block::block_info::BlockInfo; +use dpp::data_contract::associated_token::token_distribution_key::{ + TokenDistributionType, TokenDistributionKey, +}; +use dpp::serialization::PlatformSerializable; +use dpp::version::PlatformVersion; +use grovedb::batch::KeyInfoPath; +use grovedb::reference_path::ReferencePathType; +use grovedb::{Element, EstimatedLayerInformation, MaybeTree, TransactionArg, TreeType}; +use std::collections::HashMap; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use crate::util::grove_operations::BatchDeleteApplyType::{StatefulBatchDelete, StatelessBatchDelete}; +use crate::util::type_constants::DEFAULT_HASH_SIZE_U32; + +impl Drive { + /// Version 0 of `mark_perpetual_release_as_distributed_v0` + pub(super) fn mark_perpetual_release_as_distributed_operations_v0( + &self, + token_id: [u8; 32], + owner_id: [u8; 32], + previous_moment: RewardDistributionMoment, + next_moment: RewardDistributionMoment, + distribution_recipient: TokenDistributionRecipient, + block_info: &BlockInfo, + estimated_costs_only_with_layer_info: &mut Option< + HashMap, + >, + + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + let mut batch_operations = vec![]; + if let Some(estimated_costs_only_with_layer_info) = estimated_costs_only_with_layer_info { + Drive::add_estimation_costs_for_token_perpetual_distribution( + Some(token_id), + estimated_costs_only_with_layer_info, + &platform_version.drive, + )?; + } + + // Storage flags for cleanup logic + let storage_flags = StorageFlags::new_single_epoch(block_info.epoch.index, Some(owner_id)); + + let perpetual_distributions_path = token_perpetual_distributions_path_vec(token_id); + + self.batch_insert( + PathKeyElementInfo::<0>::PathKeyElement(( + perpetual_distributions_path, + vec![TOKEN_PERPETUAL_DISTRIBUTIONS_FIRST_EVENT_KEY], + Element::new_item(next_moment.to_be_bytes_vec()), + )), + &mut batch_operations, + &platform_version.drive, + )?; + + // We will distribute for the first time on the next interval + + let distribution_path_for_previous_moment = previous_moment.distribution_path(); + + let distribution_path_for_next_moment = next_moment.distribution_path(); + + let distribution_key = TokenDistributionKey { + token_id: token_id.into(), + recipient: distribution_recipient, + distribution_type: TokenDistributionType::Perpetual, + }; + + let serialized_distribution_key = distribution_key.serialize_consume_to_bytes()?; + + let remaining_reference = vec![vec![TOKEN_PERPETUAL_DISTRIBUTIONS_KEY], token_id.to_vec()]; + + let reference = ReferencePathType::UpstreamRootHeightReference(2, remaining_reference); + + let delete_apply_type = if estimated_costs_only_with_layer_info.is_some() { + StatelessBatchDelete { + in_tree_type: TreeType::NormalTree, + estimated_key_size: DEFAULT_HASH_SIZE_U32, + estimated_value_size: reference.serialized_size() as u32 + + storage_flags.serialized_size(), + } + } else { + // we know we are not deleting a subtree + StatefulBatchDelete { + is_known_to_be_subtree_with_sum: Some(MaybeTree::NotTree), + } + }; + + let new_element = + Element::new_reference_with_flags(reference, storage_flags.to_some_element_flags()); + + // We delete the old one + self.batch_delete( + distribution_path_for_previous_moment.as_slice().into(), + &serialized_distribution_key, + delete_apply_type, + transaction, + &mut batch_operations, + &platform_version.drive, + )?; + + // Now we add the new one + self.batch_insert( + PathKeyElementInfo::<0>::PathKeyElement(( + distribution_path_for_next_moment, + serialized_distribution_key, + new_element, + )), + &mut batch_operations, + &platform_version.drive, + )?; + + Ok(batch_operations) + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/mark_pre_programmed_release_as_distributed/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/mark_pre_programmed_release_as_distributed/mod.rs new file mode 100644 index 00000000000..4a4c6a92ad1 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/mark_pre_programmed_release_as_distributed/mod.rs @@ -0,0 +1,75 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::block::block_info::BlockInfo; +use dpp::prelude::TimestampMillis; +use dpp::version::PlatformVersion; +use grovedb::batch::KeyInfoPath; +use grovedb::{EstimatedLayerInformation, TransactionArg}; +use std::collections::HashMap; + +impl Drive { + /// Marks a pre‑programmed token release as distributed in the state tree. + /// + /// This function removes the scheduled pre‑programmed release (i.e. its reference) from the + /// distribution queue. In particular, it deletes the reference entry from the millisecond‑timed + /// distributions tree for the given token, release time, and identity. + /// + /// # Parameters + /// - `token_id`: The unique 32‑byte identifier of the token. + /// - `owner_id`: The unique 32‑byte identifier of the owner initiating the distribution. + /// - `identity_id`: The 32‑byte identity identifier for which the pre‑programmed release was scheduled. + /// - `release_time`: The scheduled release time (in milliseconds). + /// - `block_info`: Metadata about the current block, including epoch details. + /// - `estimated_costs_only_with_layer_info`: Optional storage layer information for cost estimation. + /// - `transaction`: The transaction context. + /// - `platform_version`: The current platform version. + /// + /// # Returns + /// - `Ok(operations)` if the operation succeeds. + /// - `Err(Error::Drive(DriveError::UnknownVersionMismatch))` if an unsupported version is encountered. + /// + /// # Versioning + /// - Uses version 0 of `mark_pre_programmed_release_as_distributed_operations_v0` if supported. + /// - Returns an error if an unknown version is received. + pub fn mark_pre_programmed_release_as_distributed_operations( + &self, + token_id: [u8; 32], + owner_id: [u8; 32], + identity_id: [u8; 32], + release_time: TimestampMillis, + block_info: &BlockInfo, + estimated_costs_only_with_layer_info: &mut Option< + HashMap, + >, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + match platform_version + .drive + .methods + .token + .distribution + .mark_pre_programmed_release_as_distributed + { + 0 => self.mark_pre_programmed_release_as_distributed_operations_v0( + token_id, + owner_id, + identity_id, + release_time, + block_info, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "mark_pre_programmed_release_as_distributed".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/mark_pre_programmed_release_as_distributed/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/mark_pre_programmed_release_as_distributed/v0/mod.rs new file mode 100644 index 00000000000..2f20e852d8c --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/mark_pre_programmed_release_as_distributed/v0/mod.rs @@ -0,0 +1,127 @@ +use crate::drive::tokens::paths::{ + token_ms_timed_at_time_distributions_path_vec, TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY, +}; +use crate::drive::Drive; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use crate::util::grove_operations::BatchDeleteApplyType::{ + StatefulBatchDelete, StatelessBatchDelete, +}; +use crate::util::storage_flags::StorageFlags; +use crate::util::type_constants::DEFAULT_HASH_SIZE_U32; +use dpp::block::block_info::BlockInfo; +use dpp::data_contract::associated_token::token_distribution_key::{ + TokenDistributionKey, TokenDistributionType, +}; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; +use dpp::prelude::TimestampMillis; +use dpp::serialization::PlatformSerializable; +use dpp::version::PlatformVersion; +use grovedb::batch::KeyInfoPath; +use grovedb::reference_path::ReferencePathType; +use grovedb::{EstimatedLayerInformation, TransactionArg, TreeType}; +use std::collections::HashMap; + +/// Marks the pre-programmed release as distributed. +/// +/// This function “consumes” the scheduled pre‑programmed release for the given token and +/// recipient (identity). In practice, it deletes the reference from the queue (i.e. the +/// millisecond‑timed distributions tree) that was previously inserted when scheduling the +/// pre‑programmed distribution. +/// +/// # Parameters +/// - `token_id`: The 32‑byte token identifier. +/// - `owner_id`: The 32‑byte owner identifier (typically the caller). +/// - `identity_id`: The identity for which the pre‑programmed release was scheduled. +/// - `release_time`: The scheduled release time (as TimestampMillis, e.g. a 4‑byte value). +/// - `block_info`: Block info for the current state transition. +/// - `estimated_costs_only_with_layer_info`: Optional estimation info. +/// - `transaction`: The GroveDB transaction argument. +/// - `platform_version`: The current platform version. +/// +/// # Returns +/// A vector of low‑level drive operations that, when applied, remove the pre‑programmed release +/// from the queue. +/// +/// # Errors +/// Returns an error if serialization fails or if the underlying batch deletion fails. +impl Drive { + pub(super) fn mark_pre_programmed_release_as_distributed_operations_v0( + &self, + token_id: [u8; 32], + owner_id: [u8; 32], + identity_id: [u8; 32], + release_time: TimestampMillis, // TimestampMillis represented as a 32-bit unsigned integer + block_info: &BlockInfo, + estimated_costs_only_with_layer_info: &mut Option< + HashMap, + >, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + if let Some(estimated_costs_only_with_layer_info) = estimated_costs_only_with_layer_info { + Drive::add_estimation_costs_for_root_token_ms_interval_distribution( + [&release_time], + estimated_costs_only_with_layer_info, + &platform_version.drive, + )?; + } + // Initialize an empty batch of operations + let mut batch_operations = vec![]; + + // Create storage flags for cleanup logic; these flags are attached to inserted elements. + let storage_flags = StorageFlags::new_single_epoch(block_info.epoch.index, Some(owner_id)); + + // The pre-programmed distribution was scheduled by inserting a reference in the + // millisecond-timed distributions tree at a key corresponding to the release time. + let ms_time_at_time_distribution_path = + token_ms_timed_at_time_distributions_path_vec(release_time); + + // Build the distribution key used when the pre-programmed release was scheduled. + let distribution_key = TokenDistributionKey { + token_id: token_id.into(), + recipient: TokenDistributionRecipient::Identity(identity_id.into()), + distribution_type: TokenDistributionType::PreProgrammed, + }; + + // Serialize the distribution key to obtain the key used in the reference tree. + let serialized_distribution_key = distribution_key.serialize_consume_to_bytes()?; + + // When scheduling, the reference was created using a “remaining reference” vector: + let remaining_reference = vec![ + vec![TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY], + token_id.to_vec(), + release_time.to_be_bytes().to_vec(), + identity_id.to_vec(), + ]; + + let reference = ReferencePathType::UpstreamRootHeightReference(2, remaining_reference); + + // Choose a delete apply type. If we are only estimating costs, use a stateless delete; + // otherwise, a stateful delete. + let delete_apply_type = if estimated_costs_only_with_layer_info.is_some() { + StatelessBatchDelete { + in_tree_type: TreeType::NormalTree, + estimated_key_size: DEFAULT_HASH_SIZE_U32, + estimated_value_size: reference.serialized_size() as u32 + + storage_flags.serialized_size(), + } + } else { + StatefulBatchDelete { + is_known_to_be_subtree_with_sum: Some(grovedb::MaybeTree::NotTree), + } + }; + + // Delete the reference from the millisecond-timed distributions tree. + self.batch_delete( + ms_time_at_time_distribution_path.as_slice().into(), + &serialized_distribution_key, + delete_apply_type, + transaction, + &mut batch_operations, + &platform_version.drive, + )?; + + Ok(batch_operations) + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/mod.rs index 7fd9a456813..c3c9e07a7bc 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/mod.rs @@ -4,8 +4,13 @@ mod add_perpetual_distribution; mod add_pre_programmed_distribution; #[cfg(feature = "server")] mod fetch; - +#[cfg(feature = "server")] +mod mark_perpetual_release_as_distributed; +#[cfg(feature = "server")] +mod mark_pre_programmed_release_as_distributed; #[cfg(feature = "server")] mod prove; /// Token distribution queries pub mod queries; +#[cfg(feature = "server")] +mod set_perpetual_distribution_next_event_for_identity_id; diff --git a/packages/rs-drive/src/drive/tokens/distribution/prove/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/prove/mod.rs index a4a6bed83b4..3b7679cd7b3 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/prove/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/prove/mod.rs @@ -1,101 +1 @@ mod pre_programmed_distributions; - -use crate::drive::tokens::distribution::queries::QueryPreProgrammedDistributionStartAt; -use crate::drive::Drive; -use crate::error::drive::DriveError; -use crate::error::Error; -use crate::fees::op::LowLevelDriveOperation; -use dpp::version::PlatformVersion; -use grovedb::TransactionArg; - -impl Drive { - /// Proves the pre‑programmed distributions for a token, using the appropriate versioned method. - /// - /// This method queries the pre‑programmed distributions tree at the path - /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: - /// - /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. - /// - **Inner keys:** Recipient identifiers (`Identifier`). - /// - **Values:** Token amounts (`TokenAmount`). - /// - /// The method dispatches to the correct versioned implementation based on the `platform_version`. - /// - /// # Parameters - /// - /// - `token_id`: The 32‑byte identifier for the token. - /// - `transaction`: The current GroveDB transaction. - /// - `platform_version`: The platform version to determine the method variant. - /// - /// # Returns - /// - /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. - pub fn prove_token_pre_programmed_distributions( - &self, - token_id: [u8; 32], - start_at: Option, - limit: Option, - transaction: TransactionArg, - platform_version: &PlatformVersion, - ) -> Result, Error> { - self.prove_token_pre_programmed_distributions_operations( - token_id, - start_at, - limit, - &mut vec![], - transaction, - platform_version, - ) - } - /// Proves the pre‑programmed distributions for a token, using the appropriate versioned method. - /// - /// This method queries the pre‑programmed distributions tree at the path - /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: - /// - /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. - /// - **Inner keys:** Recipient identifiers (`Identifier`). - /// - **Values:** Token amounts (`TokenAmount`). - /// - /// The method dispatches to the correct versioned implementation based on the `platform_version`. - /// - /// # Parameters - /// - /// - `token_id`: The 32‑byte identifier for the token. - /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. - /// - `transaction`: The current GroveDB transaction. - /// - `platform_version`: The platform version to determine the method variant. - /// - /// # Returns - /// - /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. - pub(crate) fn prove_token_pre_programmed_distributions_operations( - &self, - token_id: [u8; 32], - start_at: Option, - limit: Option, - drive_operations: &mut Vec, - transaction: TransactionArg, - platform_version: &PlatformVersion, - ) -> Result, Error> { - match platform_version - .drive - .methods - .token - .prove - .pre_programmed_distributions - { - 0 => self.prove_token_pre_programmed_distributions_operations_v0( - token_id, - start_at, - limit, - drive_operations, - transaction, - platform_version, - ), - version => Err(Error::Drive(DriveError::UnknownVersionMismatch { - method: "prove_pre_programmed_distributions_operations".to_string(), - known_versions: vec![0], - received: version, - })), - } - } -} diff --git a/packages/rs-drive/src/drive/tokens/distribution/prove/pre_programmed_distributions/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/prove/pre_programmed_distributions/mod.rs index 546c04c7cf5..8a3ca68dc9a 100644 --- a/packages/rs-drive/src/drive/tokens/distribution/prove/pre_programmed_distributions/mod.rs +++ b/packages/rs-drive/src/drive/tokens/distribution/prove/pre_programmed_distributions/mod.rs @@ -1,50 +1,101 @@ -use crate::drive::tokens::distribution::queries::{ - pre_programmed_distributions_query, QueryPreProgrammedDistributionStartAt, -}; +mod v0; + +use crate::drive::tokens::distribution::queries::QueryPreProgrammedDistributionStartAt; use crate::drive::Drive; +use crate::error::drive::DriveError; use crate::error::Error; use crate::fees::op::LowLevelDriveOperation; +use dpp::version::PlatformVersion; use grovedb::TransactionArg; -use platform_version::version::PlatformVersion; impl Drive { - /// Fetches the pre‑programmed distributions for a token as a proof. + /// Proves the pre‑programmed distributions for a token, using the appropriate versioned method. + /// + /// This method queries the pre‑programmed distributions tree at the path + /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: /// - /// This method queries the backing store for the pre‑programmed distributions tree at the path - /// defined by `token_pre_programmed_distributions_path_vec(token_id)`. It then extracts a nested - /// mapping where: + /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. + /// - **Inner keys:** Recipient identifiers (`Identifier`). + /// - **Values:** Token amounts (`TokenAmount`). /// - /// - **Outer keys:** Are timestamps (`TimestampMillis`) representing each distribution time, - /// extracted from the 5th path component (index 4). The time is expected to be stored as 4 bytes in big‑endian. - /// - **Inner keys:** Are recipient identifiers (`Identifier`) derived from the query key. - /// - **Values:** Are token amounts (`TokenAmount`), extracted from elements that are sum items. + /// The method dispatches to the correct versioned implementation based on the `platform_version`. /// /// # Parameters /// /// - `token_id`: The 32‑byte identifier for the token. - /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. /// - `transaction`: The current GroveDB transaction. - /// - `platform_version`: The platform version to use. + /// - `platform_version`: The platform version to determine the method variant. /// /// # Returns /// /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. - pub(super) fn prove_token_pre_programmed_distributions_operations_v0( + pub fn prove_token_pre_programmed_distributions( &self, token_id: [u8; 32], start_at: Option, limit: Option, - drive_operations: &mut Vec, transaction: TransactionArg, platform_version: &PlatformVersion, ) -> Result, Error> { - let path_query = pre_programmed_distributions_query(token_id, start_at, limit); - - self.grove_get_proved_path_query( - &path_query, + self.prove_token_pre_programmed_distributions_operations( + token_id, + start_at, + limit, + &mut vec![], transaction, - drive_operations, - &platform_version.drive, + platform_version, ) } + /// Proves the pre‑programmed distributions for a token, using the appropriate versioned method. + /// + /// This method queries the pre‑programmed distributions tree at the path + /// `token_pre_programmed_distributions_path_vec(token_id)`. It constructs a nested mapping where: + /// + /// - **Outer keys:** Timestamps (`TimestampMillis`) representing each distribution time. + /// - **Inner keys:** Recipient identifiers (`Identifier`). + /// - **Values:** Token amounts (`TokenAmount`). + /// + /// The method dispatches to the correct versioned implementation based on the `platform_version`. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to determine the method variant. + /// + /// # Returns + /// + /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. + pub(crate) fn prove_token_pre_programmed_distributions_operations( + &self, + token_id: [u8; 32], + start_at: Option, + limit: Option, + drive_operations: &mut Vec, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + match platform_version + .drive + .methods + .token + .prove + .pre_programmed_distributions + { + 0 => self.prove_token_pre_programmed_distributions_operations_v0( + token_id, + start_at, + limit, + drive_operations, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "prove_pre_programmed_distributions_operations".to_string(), + known_versions: vec![0], + received: version, + })), + } + } } diff --git a/packages/rs-drive/src/drive/tokens/distribution/prove/pre_programmed_distributions/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/prove/pre_programmed_distributions/v0/mod.rs new file mode 100644 index 00000000000..546c04c7cf5 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/prove/pre_programmed_distributions/v0/mod.rs @@ -0,0 +1,50 @@ +use crate::drive::tokens::distribution::queries::{ + pre_programmed_distributions_query, QueryPreProgrammedDistributionStartAt, +}; +use crate::drive::Drive; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use grovedb::TransactionArg; +use platform_version::version::PlatformVersion; + +impl Drive { + /// Fetches the pre‑programmed distributions for a token as a proof. + /// + /// This method queries the backing store for the pre‑programmed distributions tree at the path + /// defined by `token_pre_programmed_distributions_path_vec(token_id)`. It then extracts a nested + /// mapping where: + /// + /// - **Outer keys:** Are timestamps (`TimestampMillis`) representing each distribution time, + /// extracted from the 5th path component (index 4). The time is expected to be stored as 4 bytes in big‑endian. + /// - **Inner keys:** Are recipient identifiers (`Identifier`) derived from the query key. + /// - **Values:** Are token amounts (`TokenAmount`), extracted from elements that are sum items. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to use. + /// + /// # Returns + /// + /// A `Result` containing a nested `BTreeMap` on success or an `Error` on failure. + pub(super) fn prove_token_pre_programmed_distributions_operations_v0( + &self, + token_id: [u8; 32], + start_at: Option, + limit: Option, + drive_operations: &mut Vec, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + let path_query = pre_programmed_distributions_query(token_id, start_at, limit); + + self.grove_get_proved_path_query( + &path_query, + transaction, + drive_operations, + &platform_version.drive, + ) + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/set_perpetual_distribution_next_event_for_identity_id/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/set_perpetual_distribution_next_event_for_identity_id/mod.rs new file mode 100644 index 00000000000..6c91595ec48 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/set_perpetual_distribution_next_event_for_identity_id/mod.rs @@ -0,0 +1,66 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::block::block_info::BlockInfo; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use dpp::identifier::Identifier; +use dpp::version::PlatformVersion; + +impl Drive { + /// Sets the next scheduled event time for a perpetual distribution for a given identity, + /// using the appropriate versioned method. + /// + /// This method updates the perpetual distributions tree at the path + /// `token_perpetual_distributions_path_vec(token_id)`. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `identity_id`: The identifier of the identity whose next event timestamp is being set. + /// - `moment`: The `RewardDistributionMoment` indicating the moment the identity just made their claim. + /// - `block_info`: Block metadata used for setting storage flags. + /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to determine the method variant. + /// + /// # Returns + /// + /// A `Result<(), Error>` indicating success or failure. + pub(crate) fn set_perpetual_distribution_claimed_for_identity_id_operations( + &self, + token_id: [u8; 32], + identity_id: Identifier, + moment: RewardDistributionMoment, + block_info: &BlockInfo, + known_to_be_replace: bool, + drive_operations: &mut Vec, + platform_version: &PlatformVersion, + ) -> Result<(), Error> { + match platform_version + .drive + .methods + .token + .update + .perpetual_distribution_next_event_for_identity_id + { + 0 => self.set_perpetual_distribution_claimed_for_identity_id_operations_v0( + token_id, + identity_id, + moment, + block_info, + known_to_be_replace, + drive_operations, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "set_perpetual_distribution_next_event_for_identity_id_operations" + .to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/distribution/set_perpetual_distribution_next_event_for_identity_id/v0/mod.rs b/packages/rs-drive/src/drive/tokens/distribution/set_perpetual_distribution_next_event_for_identity_id/v0/mod.rs new file mode 100644 index 00000000000..f5b99b6bc74 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/distribution/set_perpetual_distribution_next_event_for_identity_id/v0/mod.rs @@ -0,0 +1,83 @@ +use crate::drive::tokens::paths::token_perpetual_distributions_identity_last_claimed_time_path_vec; +use crate::drive::Drive; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use crate::util::storage_flags::StorageFlags; +use dpp::block::block_info::BlockInfo; +use dpp::identifier::Identifier; +use dpp::version::PlatformVersion; +use grovedb::Element; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use crate::util::object_size_info::PathKeyElementInfo; + +impl Drive { + /// Sets the next scheduled event time for a perpetual distribution for a given identity. + /// + /// This method updates the tree at `token_perpetual_distributions_path_vec(token_id)` + /// by storing an 8-byte big-endian encoded timestamp representing the next scheduled distribution event. + /// + /// # Parameters + /// + /// - `token_id`: The 32‑byte identifier for the token. + /// - `identity_id`: The identifier of the identity whose next event timestamp is being set. + /// - `moment`: The `RewardDistributionMoment` indicating the moment the identity just made their claim. + /// - `block_info`: Block metadata for setting storage flags. + /// - `drive_operations`: A mutable vector to accumulate low-level drive operations. + /// - `transaction`: The current GroveDB transaction. + /// - `platform_version`: The platform version to determine the method variant. + /// + /// # Returns + /// + /// A `Result<(), Error>` indicating success or failure. + pub(super) fn set_perpetual_distribution_claimed_for_identity_id_operations_v0( + &self, + token_id: [u8; 32], + identity_id: Identifier, + moment: RewardDistributionMoment, + block_info: &BlockInfo, + known_to_be_replace: bool, + drive_operations: &mut Vec, + platform_version: &PlatformVersion, + ) -> Result<(), Error> { + let perpetual_distributions_path = + token_perpetual_distributions_identity_last_claimed_time_path_vec(token_id); + + let moment_bytes = moment.to_be_bytes_vec(); + + // Generate storage flags for tracking historical cleanup + let storage_flags = + StorageFlags::new_single_epoch(block_info.epoch.index, Some(identity_id.to_buffer())); + + if known_to_be_replace { + // This is slightly more performant + self.batch_replace( + PathKeyElementInfo::<0>::PathKeyRefElement(( + perpetual_distributions_path, + identity_id.as_slice(), + Element::new_item_with_flags( + moment_bytes, + storage_flags.to_some_element_flags(), + ), + )), + drive_operations, + &platform_version.drive, + )?; + } else { + // Insert the timestamp into the tree + self.batch_insert( + PathKeyElementInfo::<0>::PathKeyRefElement(( + perpetual_distributions_path, + identity_id.as_slice(), + Element::new_item_with_flags( + moment_bytes, + storage_flags.to_some_element_flags(), + ), + )), + drive_operations, + &platform_version.drive, + )?; + } + + Ok(()) + } +} diff --git a/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/mod.rs b/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/mod.rs index 7ea7d03fd5a..9e3a49a5bea 100644 --- a/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/mod.rs +++ b/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/mod.rs @@ -15,7 +15,7 @@ impl Drive { drive_version: &DriveVersion, ) -> Result<(), Error> where - I: IntoIterator + ExactSizeIterator, + I: IntoIterator, { match drive_version .methods diff --git a/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/v0/mod.rs b/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/v0/mod.rs index 6efd7d1575d..ff12c2d0141 100644 --- a/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/estimated_costs/for_root_token_ms_interval_distribution/v0/mod.rs @@ -19,7 +19,7 @@ impl Drive { times: I, estimated_costs_only_with_layer_info: &mut HashMap, ) where - I: IntoIterator + ExactSizeIterator, + I: IntoIterator, { // 1. Insert estimation for the generic timed distributions tree. estimated_costs_only_with_layer_info.insert( diff --git a/packages/rs-drive/src/drive/tokens/estimated_costs/for_token_perpetual_distribution/mod.rs b/packages/rs-drive/src/drive/tokens/estimated_costs/for_token_perpetual_distribution/mod.rs new file mode 100644 index 00000000000..a789a4929e6 --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/estimated_costs/for_token_perpetual_distribution/mod.rs @@ -0,0 +1,67 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use dpp::prelude::TimestampMillis; +use dpp::version::drive_versions::DriveVersion; +use grovedb::batch::KeyInfoPath; +use grovedb::EstimatedLayerInformation; +use std::collections::HashMap; + +impl Drive { + /// Adds cost estimation entries for a token's pre-programmed distribution tree. + /// + /// This function updates the provided `estimated_costs_only_with_layer_info` hashmap with estimation entries + /// for each layer in the pre-programmed distribution tree associated with a specific token. The tree structure + /// includes: + /// + /// - The root level of the pre-programmed distributions. + /// - The token-specific subtree (keyed by `token_id`). + /// - One sum tree per distribution time (each timestamp in `times`). + /// + /// The function selects the appropriate estimation logic based on the provided `drive_version`. + /// + /// # Parameters + /// + /// - `token_id`: The 32-byte identifier for the token whose pre-programmed distribution tree is being estimated. + /// - `times`: A vector of timestamps (in milliseconds) for which pre-programmed distributions exist. + /// - `estimated_costs_only_with_layer_info`: A mutable hashmap that maps `KeyInfoPath` to `EstimatedLayerInformation`. + /// This cache is used by Grovedb to track the estimated storage costs for each layer in the tree. + /// - `drive_version`: The drive version that determines which estimation logic to use. + /// + /// # Returns + /// + /// - `Ok(())` if the estimation entries were successfully added. + /// - `Err(DriveError::UnknownVersionMismatch)` if the provided `drive_version` does not match any supported version. + /// + /// # Errors + /// + /// Returns an error if the `drive_version` is not recognized, ensuring that only supported estimation + /// implementations are applied. + pub(crate) fn add_estimation_costs_for_token_perpetual_distribution( + token_id: Option<[u8; 32]>, + estimated_costs_only_with_layer_info: &mut HashMap, + drive_version: &DriveVersion, + ) -> Result<(), Error> { + match drive_version + .methods + .identity + .cost_estimation + .for_token_perpetual_distribution + { + 0 => { + Self::add_estimation_costs_for_token_perpetual_distribution_v0( + token_id, + estimated_costs_only_with_layer_info, + ); + Ok(()) + } + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "add_estimation_costs_for_token_perpetual_distribution".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/estimated_costs/for_token_perpetual_distribution/v0/mod.rs b/packages/rs-drive/src/drive/tokens/estimated_costs/for_token_perpetual_distribution/v0/mod.rs new file mode 100644 index 00000000000..2b14ee3f14d --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/estimated_costs/for_token_perpetual_distribution/v0/mod.rs @@ -0,0 +1,76 @@ +use crate::drive::tokens::paths::{ + token_distributions_root_path_vec, + token_perpetual_distributions_identity_last_claimed_time_path_vec, + token_perpetual_distributions_path_vec, token_root_perpetual_distributions_path_vec, +}; +use crate::drive::Drive; +use crate::util::type_constants::{DEFAULT_HASH_SIZE_U8, U64_SIZE_U32, U8_SIZE_U8}; +use grovedb::batch::KeyInfoPath; +use grovedb::EstimatedLayerCount::{ApproximateElements, EstimatedLevel}; +use grovedb::EstimatedLayerSizes::{AllItems, AllSubtrees}; +use grovedb::EstimatedSumTrees::NoSumTrees; +use grovedb::{EstimatedLayerInformation, TreeType}; +use std::collections::HashMap; + +impl Drive { + /// Version 0 of the estimation function for perpetual distributions. + /// + /// This function adds estimation cost entries for: + /// 1. The root perpetual distributions tree. + /// 2. The token-specific subtree (using `token_id`). + /// 3. The subtree tracking identities' last claim time. + /// + /// # Parameters + /// - `token_id`: The identifier for the token. + /// - `estimated_costs_only_with_layer_info`: A mutable hashmap that holds estimated layer information. + pub(crate) fn add_estimation_costs_for_token_perpetual_distribution_v0( + token_id: Option<[u8; 32]>, + estimated_costs_only_with_layer_info: &mut HashMap, + ) { + // 1. Add estimation for the root distributions tree. + estimated_costs_only_with_layer_info.insert( + KeyInfoPath::from_known_owned_path(token_distributions_root_path_vec()), + EstimatedLayerInformation { + tree_type: TreeType::NormalTree, + estimated_layer_count: EstimatedLevel(1, false), + estimated_layer_sizes: AllSubtrees(U8_SIZE_U8, NoSumTrees, None), + }, + ); + + // 2. Add estimation for the root perpetual distributions tree. + estimated_costs_only_with_layer_info.insert( + KeyInfoPath::from_known_owned_path(token_root_perpetual_distributions_path_vec()), + EstimatedLayerInformation { + tree_type: TreeType::NormalTree, + estimated_layer_count: EstimatedLevel(10, false), // Estimated depth + estimated_layer_sizes: AllSubtrees(DEFAULT_HASH_SIZE_U8, NoSumTrees, None), + }, + ); + + if let Some(token_id) = token_id { + // 3. Add estimation for the token-specific perpetual distribution subtree. + estimated_costs_only_with_layer_info.insert( + KeyInfoPath::from_known_owned_path(token_perpetual_distributions_path_vec( + token_id, + )), + EstimatedLayerInformation { + tree_type: TreeType::NormalTree, + estimated_layer_count: EstimatedLevel(2, false), + estimated_layer_sizes: AllSubtrees(U8_SIZE_U8, NoSumTrees, None), + }, + ); + + // 4. Add estimation for identities' last claim subtree + estimated_costs_only_with_layer_info.insert( + KeyInfoPath::from_known_owned_path( + token_perpetual_distributions_identity_last_claimed_time_path_vec(token_id), + ), + EstimatedLayerInformation { + tree_type: TreeType::NormalTree, + estimated_layer_count: ApproximateElements(1000), // Example size, adjust as needed + estimated_layer_sizes: AllItems(DEFAULT_HASH_SIZE_U8, U64_SIZE_U32, None), + }, + ); + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/estimated_costs/mod.rs b/packages/rs-drive/src/drive/tokens/estimated_costs/mod.rs index b0ebc0bb9d1..9afdf1164de 100644 --- a/packages/rs-drive/src/drive/tokens/estimated_costs/mod.rs +++ b/packages/rs-drive/src/drive/tokens/estimated_costs/mod.rs @@ -15,3 +15,6 @@ pub mod for_token_pre_programmed_distribution; /// Module for handling operations related to token interval distribution. pub mod for_root_token_ms_interval_distribution; + +/// Module for handling operations related to token perpetual distribution +pub mod for_token_perpetual_distribution; diff --git a/packages/rs-drive/src/drive/tokens/info/prove_identities_token_infos/v0/mod.rs b/packages/rs-drive/src/drive/tokens/info/prove_identities_token_infos/v0/mod.rs index d74ae92a503..e8ea8a373f4 100644 --- a/packages/rs-drive/src/drive/tokens/info/prove_identities_token_infos/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/info/prove_identities_token_infos/v0/mod.rs @@ -75,7 +75,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -87,6 +86,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, @@ -170,7 +175,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -182,6 +186,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, @@ -254,7 +264,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -266,6 +275,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, diff --git a/packages/rs-drive/src/drive/tokens/info/prove_identity_token_infos/v0/mod.rs b/packages/rs-drive/src/drive/tokens/info/prove_identity_token_infos/v0/mod.rs index e6d7247d861..4ed3b4cbf53 100644 --- a/packages/rs-drive/src/drive/tokens/info/prove_identity_token_infos/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/info/prove_identity_token_infos/v0/mod.rs @@ -77,7 +77,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -89,6 +88,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([ ( @@ -225,7 +230,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -237,6 +241,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, diff --git a/packages/rs-drive/src/drive/tokens/mint_many/mod.rs b/packages/rs-drive/src/drive/tokens/mint_many/mod.rs new file mode 100644 index 00000000000..ecbe0234ebf --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/mint_many/mod.rs @@ -0,0 +1,107 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::block::block_info::BlockInfo; +use dpp::fee::fee_result::FeeResult; +use dpp::prelude::Identifier; +use dpp::version::PlatformVersion; +use grovedb::{batch::KeyInfoPath, EstimatedLayerInformation, TransactionArg}; +use std::collections::HashMap; + +impl Drive { + /// Mints (issues) new tokens by increasing the total supply and adding them to an identity's balance. + pub fn token_mint_many( + &self, + token_id: Identifier, + recipients: Vec<(Identifier, u64)>, + issuance_amount: u64, + allow_first_mint: bool, + block_info: &BlockInfo, + apply: bool, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version.drive.methods.token.update.mint_many { + 0 => self.token_mint_many_v0( + token_id, + recipients, + issuance_amount, + allow_first_mint, + block_info, + apply, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "token_mint_many".to_string(), + known_versions: vec![0], + received: version, + })), + } + } + + /// Adds the operations to mint_many tokens without calculating fees and optionally applying. + pub fn token_mint_many_add_to_operations( + &self, + token_id: Identifier, + recipients: Vec<(Identifier, u64)>, + issuance_amount: u64, + allow_first_mint: bool, + apply: bool, + transaction: TransactionArg, + drive_operations: &mut Vec, + platform_version: &PlatformVersion, + ) -> Result<(), Error> { + match platform_version.drive.methods.token.update.mint_many { + 0 => self.token_mint_many_add_to_operations_v0( + token_id, + recipients, + issuance_amount, + allow_first_mint, + apply, + transaction, + drive_operations, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "token_mint_many_add_to_operations".to_string(), + known_versions: vec![0], + received: version, + })), + } + } + + /// Gathers the operations needed to mint_many tokens. + pub fn token_mint_many_operations( + &self, + token_id: Identifier, + recipients: Vec<(Identifier, u64)>, + issuance_amount: u64, + allow_first_mint: bool, + estimated_costs_only_with_layer_info: &mut Option< + HashMap, + >, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + match platform_version.drive.methods.token.update.mint_many { + 0 => self.token_mint_many_operations_v0( + token_id, + recipients, + issuance_amount, + allow_first_mint, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "token_mint_many_operations".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/mint_many/v0/mod.rs b/packages/rs-drive/src/drive/tokens/mint_many/v0/mod.rs new file mode 100644 index 00000000000..077980e444a --- /dev/null +++ b/packages/rs-drive/src/drive/tokens/mint_many/v0/mod.rs @@ -0,0 +1,143 @@ +use crate::drive::Drive; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::block::block_info::BlockInfo; +use dpp::fee::fee_result::FeeResult; +use dpp::identifier::Identifier; +use dpp::version::PlatformVersion; +use grovedb::{batch::KeyInfoPath, EstimatedLayerInformation, TransactionArg}; +use std::collections::HashMap; + +impl Drive { + pub(super) fn token_mint_many_v0( + &self, + token_id: Identifier, + recipients: Vec<(Identifier, u64)>, + issuance_amount: u64, + allow_first_mint: bool, + block_info: &BlockInfo, + apply: bool, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + let mut drive_operations = vec![]; + + self.token_mint_many_add_to_operations_v0( + token_id, + recipients, + issuance_amount, + allow_first_mint, + apply, + transaction, + &mut drive_operations, + platform_version, + )?; + + let fees = Drive::calculate_fee( + None, + Some(drive_operations), + &block_info.epoch, + self.config.epochs_per_era, + platform_version, + None, + )?; + + Ok(fees) + } + + pub(super) fn token_mint_many_add_to_operations_v0( + &self, + token_id: Identifier, + recipients: Vec<(Identifier, u64)>, + issuance_amount: u64, + allow_first_mint: bool, + apply: bool, + transaction: TransactionArg, + drive_operations: &mut Vec, + platform_version: &PlatformVersion, + ) -> Result<(), Error> { + let mut estimated_costs_only_with_layer_info = + if apply { None } else { Some(HashMap::new()) }; + + let batch_operations = self.token_mint_many_operations_v0( + token_id, + recipients, + issuance_amount, + allow_first_mint, + &mut estimated_costs_only_with_layer_info, + transaction, + platform_version, + )?; + + self.apply_batch_low_level_drive_operations( + estimated_costs_only_with_layer_info, + transaction, + batch_operations, + drive_operations, + &platform_version.drive, + ) + } + + pub(super) fn token_mint_many_operations_v0( + &self, + token_id: Identifier, + mut recipients: Vec<(Identifier, u64)>, + total_mint_amount: u64, + allow_first_mint: bool, + estimated_costs_only_with_layer_info: &mut Option< + HashMap, + >, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + let mut drive_operations = vec![]; + + let weight_sum = recipients + .iter_mut() + .map(|(_, weight)| { + // We do this so we can't overflow + if *weight > u32::MAX as u64 { + *weight = u32::MAX as u64 + } + *weight + }) + .sum::(); + let total_mint_amount_u128 = total_mint_amount as u128; + + let mut balance_left = total_mint_amount; + + for (i, (identity_id, weight)) in recipients.iter().enumerate() { + let amount = if i == recipients.len() - 1 { + balance_left + } else { + let amount = total_mint_amount_u128 + .saturating_mul(*weight as u128) + .div_ceil(weight_sum as u128) as u64; + balance_left -= amount; + amount + }; + + drive_operations.extend(self.add_to_identity_token_balance_operations( + token_id.to_buffer(), + identity_id.to_buffer(), + amount, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + )?); + } + + // Update total supply + + drive_operations.extend(self.add_to_token_total_supply_operations( + token_id.to_buffer(), + total_mint_amount, + allow_first_mint, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + )?); + + Ok(drive_operations) + } +} diff --git a/packages/rs-drive/src/drive/tokens/mod.rs b/packages/rs-drive/src/drive/tokens/mod.rs index c6ba59c841d..917425b34b8 100644 --- a/packages/rs-drive/src/drive/tokens/mod.rs +++ b/packages/rs-drive/src/drive/tokens/mod.rs @@ -28,6 +28,10 @@ pub mod info; #[cfg(feature = "server")] pub mod mint; +/// Implements minting operations for creating new tokens towards many recipients at the same time. +#[cfg(feature = "server")] +pub mod mint_many; + /// Manages system-level operations and utilities. #[cfg(feature = "server")] pub mod system; diff --git a/packages/rs-drive/src/drive/tokens/paths.rs b/packages/rs-drive/src/drive/tokens/paths.rs index 8e212c58c78..6c072c56225 100644 --- a/packages/rs-drive/src/drive/tokens/paths.rs +++ b/packages/rs-drive/src/drive/tokens/paths.rs @@ -2,6 +2,7 @@ use crate::drive::RootTree; use dpp::block::block_info::BlockInfo; use dpp::block::epoch::EpochIndex; use dpp::data_contract::associated_token::token_perpetual_distribution::methods::v0::TokenPerpetualDistributionV0Accessors; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_type::RewardDistributionType; use dpp::data_contract::associated_token::token_perpetual_distribution::TokenPerpetualDistribution; use dpp::prelude::{BlockHeight, TimestampMillis}; @@ -52,6 +53,15 @@ pub const TOKEN_BLOCK_TIMED_DISTRIBUTIONS_KEY: u8 = 64; /// Key for the epoch timed token distributions. pub const TOKEN_EPOCH_TIMED_DISTRIBUTIONS_KEY: u8 = 192; +/// Key for the perpetual distribution info. +pub const TOKEN_PERPETUAL_DISTRIBUTIONS_INFO_KEY: u8 = 128; + +/// Key for the perpetual distribution first event. +pub const TOKEN_PERPETUAL_DISTRIBUTIONS_FIRST_EVENT_KEY: u8 = 64; + +/// Key for the perpetual distribution last claim for identities key. +pub const TOKEN_PERPETUAL_DISTRIBUTIONS_FOR_IDENTITIES_LAST_CLAIM_KEY: u8 = 192; + /// The path for the balances tree pub fn tokens_root_path() -> [&'static [u8]; 1] { @@ -178,6 +188,32 @@ pub fn token_perpetual_distributions_path_vec(token_id: [u8; 32]) -> Vec ] } +/// The path for the token perpetual distributions tree for a token +pub fn token_perpetual_distributions_identity_last_claimed_time_path( + token_id: &[u8; 32], +) -> [&[u8]; 5] { + [ + Into::<&[u8; 1]>::into(RootTree::Tokens), + &[TOKEN_DISTRIBUTIONS_KEY], + &[TOKEN_PERPETUAL_DISTRIBUTIONS_KEY], + token_id, + &[TOKEN_PERPETUAL_DISTRIBUTIONS_FOR_IDENTITIES_LAST_CLAIM_KEY], + ] +} + +/// The path for the token perpetual distributions tree for a token as a vector +pub fn token_perpetual_distributions_identity_last_claimed_time_path_vec( + token_id: [u8; 32], +) -> Vec> { + vec![ + vec![RootTree::Tokens as u8], + vec![TOKEN_DISTRIBUTIONS_KEY], + vec![TOKEN_PERPETUAL_DISTRIBUTIONS_KEY], + token_id.to_vec(), + vec![TOKEN_PERPETUAL_DISTRIBUTIONS_FOR_IDENTITIES_LAST_CLAIM_KEY], + ] +} + /// The path for the token pre-programmed distributions tree pub fn token_root_pre_programmed_distributions_path() -> [&'static [u8]; 3] { [ @@ -422,19 +458,22 @@ pub trait TokenPerpetualDistributionPaths { /// Returns the path where the perpetual distribution should be stored. fn distribution_path(&self, unit: u64) -> Vec>; /// Returns the path where the perpetual distribution should be stored. - fn distribution_path_for_next_interval(&self, block_info: &BlockInfo) -> Vec>; + fn distribution_path_for_next_interval_from_block_info( + &self, + block_info: &BlockInfo, + ) -> Vec>; } impl TokenPerpetualDistributionPaths for TokenPerpetualDistribution { fn root_distribution_path(&self) -> Vec> { match self.distribution_type() { - RewardDistributionType::BlockBasedDistribution(_, _, _) => { + RewardDistributionType::BlockBasedDistribution { .. } => { token_block_timed_distributions_path_vec() } - RewardDistributionType::TimeBasedDistribution(_, _, _) => { + RewardDistributionType::TimeBasedDistribution { .. } => { token_ms_timed_distributions_path_vec() } - RewardDistributionType::EpochBasedDistribution(_, _, _) => { + RewardDistributionType::EpochBasedDistribution { .. } => { token_epoch_timed_distributions_path_vec() } } @@ -442,37 +481,62 @@ impl TokenPerpetualDistributionPaths for TokenPerpetualDistribution { fn distribution_path(&self, unit: u64) -> Vec> { match self.distribution_type() { - RewardDistributionType::BlockBasedDistribution(_, _, _) => { + RewardDistributionType::BlockBasedDistribution { .. } => { token_block_timed_at_block_distributions_path_vec(unit) } - RewardDistributionType::TimeBasedDistribution(_, _, _) => { + RewardDistributionType::TimeBasedDistribution { .. } => { token_ms_timed_at_time_distributions_path_vec(unit) } - RewardDistributionType::EpochBasedDistribution(_, _, _) => { + RewardDistributionType::EpochBasedDistribution { .. } => { token_epoch_timed_at_epoch_distributions_path_vec(unit as EpochIndex) } } } - fn distribution_path_for_next_interval(&self, block_info: &BlockInfo) -> Vec> { + fn distribution_path_for_next_interval_from_block_info( + &self, + block_info: &BlockInfo, + ) -> Vec> { match self.distribution_type() { // If the distribution is based on block height, return the next height where emissions occur. - RewardDistributionType::BlockBasedDistribution(interval, _, _) => { + RewardDistributionType::BlockBasedDistribution { interval, .. } => { let height = block_info.height - block_info.height % interval + interval; token_block_timed_at_block_distributions_path_vec(height) } // If the distribution is based on time, return the next timestamp in milliseconds. - RewardDistributionType::TimeBasedDistribution(interval, _, _) => { + RewardDistributionType::TimeBasedDistribution { interval, .. } => { let time = block_info.time_ms - block_info.time_ms % interval + interval; token_ms_timed_at_time_distributions_path_vec(time) } // If the distribution is based on epochs, return the next epoch index. - RewardDistributionType::EpochBasedDistribution(interval, _, _) => { + RewardDistributionType::EpochBasedDistribution { interval, .. } => { let index = block_info.epoch.index - block_info.epoch.index % interval + interval; token_epoch_timed_at_epoch_distributions_path_vec(index) } } } } + +/// Paths for the token perpetual distribution moment +pub trait TokenPerpetualDistributionMomentPaths { + /// The distribution path for a moment + fn distribution_path(&self) -> Vec>; +} + +impl TokenPerpetualDistributionMomentPaths for RewardDistributionMoment { + fn distribution_path(&self) -> Vec> { + match self { + RewardDistributionMoment::BlockBasedMoment(height) => { + token_block_timed_at_block_distributions_path_vec(*height) + } + RewardDistributionMoment::TimeBasedMoment(time_ms) => { + token_ms_timed_at_time_distributions_path_vec(*time_ms) + } + RewardDistributionMoment::EpochBasedMoment(epoch) => { + token_epoch_timed_at_epoch_distributions_path_vec(*epoch) + } + } + } +} diff --git a/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/v0/mod.rs b/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/v0/mod.rs index 07b6fef19c7..a66e2c2d15c 100644 --- a/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/status/prove_token_statuses/v0/mod.rs @@ -66,7 +66,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -78,6 +77,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([ ( diff --git a/packages/rs-drive/src/drive/tokens/system/prove_token_total_supply_and_aggregated_identity_balances/v0/mod.rs b/packages/rs-drive/src/drive/tokens/system/prove_token_total_supply_and_aggregated_identity_balances/v0/mod.rs index 64ee4a9dd36..c20e544fa00 100644 --- a/packages/rs-drive/src/drive/tokens/system/prove_token_total_supply_and_aggregated_identity_balances/v0/mod.rs +++ b/packages/rs-drive/src/drive/tokens/system/prove_token_total_supply_and_aggregated_identity_balances/v0/mod.rs @@ -66,7 +66,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -78,6 +77,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, @@ -182,7 +187,6 @@ mod tests { version: 0, owner_id: Default::default(), document_types: Default::default(), - metadata: None, config: DataContractConfig::V0(DataContractConfigV0 { can_be_deleted: false, readonly: false, @@ -194,6 +198,12 @@ mod tests { requires_identity_decryption_bounded_key: None, }), schema_defs: None, + created_at: None, + updated_at: None, + created_at_block_height: None, + updated_at_block_height: None, + created_at_epoch: None, + updated_at_epoch: None, groups: Default::default(), tokens: BTreeMap::from([( 0, diff --git a/packages/rs-drive/src/error/drive.rs b/packages/rs-drive/src/error/drive.rs index 922cc5b8e4b..2f318704837 100644 --- a/packages/rs-drive/src/error/drive.rs +++ b/packages/rs-drive/src/error/drive.rs @@ -1,5 +1,6 @@ use crate::drive::contract::MAX_CONTRACT_HISTORY_FETCH_LIMIT; use dpp::fee::Credits; +use dpp::identifier::Identifier; use dpp::version::FeatureVersion; /// Drive errors @@ -188,4 +189,8 @@ pub enum DriveError { /// Data Contract not found #[error("data contract not found: {0}")] DataContractNotFound(String), + + /// Data Contract not found + #[error("data contract does not have a start moment: {0}")] + ContractDoesNotHaveAStartMoment(Identifier), } diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/mod.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/mod.rs index ac5990efb1b..d805cdc4b87 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/mod.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/mod.rs @@ -1,4 +1,5 @@ mod token_burn_transition; +mod token_claim_transition; mod token_config_update_transition; mod token_destroy_frozen_funds_transition; mod token_emergency_action_transition; diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_burn_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_burn_transition.rs index b4d62088fc7..b500818b219 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_burn_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_burn_transition.rs @@ -1,5 +1,6 @@ use dpp::block::epoch::Epoch; use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_keeps_history_rules::accessors::v0::TokenKeepsHistoryRulesV0Getters; use dpp::group::action_event::GroupActionEvent; use dpp::group::group_action::GroupAction; use dpp::group::group_action::v0::GroupActionV0; @@ -79,7 +80,7 @@ impl DriveHighLevelBatchOperationConverter for TokenBurnTransitionAction { })); let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { + if token_configuration.keeps_history().keeps_burning_history() { ops.push(TokenOperation(TokenOperationType::TokenHistory { token_id: self.token_id(), owner_id, diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_claim_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_claim_transition.rs new file mode 100644 index 00000000000..628c27ec748 --- /dev/null +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_claim_transition.rs @@ -0,0 +1,117 @@ +use dpp::block::epoch::Epoch; +use dpp::data_contract::associated_token::token_distribution_key::TokenDistributionInfo; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionResolvedRecipient; +use dpp::identifier::Identifier; +use dpp::tokens::token_event::TokenEvent; +use platform_version::version::PlatformVersion; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::state_transition_action::action_convert_to_operations::batch::DriveHighLevelBatchOperationConverter; +use crate::state_transition_action::batch::batched_transition::token_transition::token_base_transition_action::TokenBaseTransitionActionAccessorsV0; +use crate::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::{TokenClaimTransitionAction, TokenClaimTransitionActionAccessorsV0}; +use crate::util::batch::{DriveOperation, IdentityOperationType}; +use crate::util::batch::drive_op_batch::TokenOperationType; +use crate::util::batch::DriveOperation::{IdentityOperation, TokenOperation}; + +impl DriveHighLevelBatchOperationConverter for TokenClaimTransitionAction { + fn into_high_level_batch_drive_operations<'b>( + self, + _epoch: &Epoch, + owner_id: Identifier, + platform_version: &PlatformVersion, + ) -> Result>, Error> { + match platform_version + .drive + .methods + .state_transitions + .convert_to_high_level_operations + .token_claim_transition + { + 0 => { + let data_contract_id = self.base().data_contract_id(); + + let identity_contract_nonce = self.base().identity_contract_nonce(); + + let mut ops = vec![IdentityOperation( + IdentityOperationType::UpdateIdentityContractNonce { + identity_id: owner_id.into_buffer(), + contract_id: data_contract_id.into_buffer(), + nonce: identity_contract_nonce, + }, + )]; + + match self.distribution_info() { + TokenDistributionInfo::Perpetual( + _, + _, + TokenDistributionResolvedRecipient::ContractOwnerIdentity(identity), + ) + | TokenDistributionInfo::PreProgrammed(_, identity) + | TokenDistributionInfo::Perpetual( + _, + _, + TokenDistributionResolvedRecipient::Identity(identity), + ) + | TokenDistributionInfo::Perpetual( + _, + _, + TokenDistributionResolvedRecipient::Evonode(identity), + ) => { + ops.push(TokenOperation(TokenOperationType::TokenMint { + token_id: self.token_id(), + identity_balance_holder_id: *identity, + mint_amount: self.amount(), + allow_first_mint: false, + })); + } + } + + match self.distribution_info() { + TokenDistributionInfo::PreProgrammed(release_time, recipient) => { + ops.push(TokenOperation( + TokenOperationType::TokenMarkPreProgrammedReleaseAsDistributed { + token_id: self.token_id(), + owner_id, + identity_id: *recipient, + release_time: *release_time, + }, + )); + } + TokenDistributionInfo::Perpetual( + last_release_moment, + next_release_moment, + _, + ) => { + ops.push(TokenOperation( + TokenOperationType::TokenMarkPerpetualReleaseAsDistributed { + token_id: self.token_id(), + owner_id, + last_release_moment: *last_release_moment, + next_release_moment: *next_release_moment, + recipient: self.recipient(), + }, + )); + } + } + ops.push(TokenOperation(TokenOperationType::TokenHistory { + token_id: self.token_id(), + owner_id, + nonce: identity_contract_nonce, + event: TokenEvent::Claim( + self.distribution_info().into(), + self.amount(), + self.public_note_owned(), + ), + })); + + Ok(ops) + } + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "TokenClaimTransitionAction::into_high_level_document_drive_operations" + .to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_config_update_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_config_update_transition.rs index ff0060b0f1f..90eca44d98f 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_config_update_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_config_update_transition.rs @@ -2,7 +2,6 @@ use std::borrow::Cow; use dpp::block::epoch::Epoch; use dpp::data_contract::accessors::v0::DataContractV0Setters; use dpp::data_contract::accessors::v1::DataContractV1Setters; -use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; use dpp::group::action_event::GroupActionEvent; use dpp::group::group_action::GroupAction; use dpp::group::group_action::v0::GroupActionV0; @@ -92,18 +91,15 @@ impl DriveHighLevelBatchOperationConverter for TokenConfigUpdateTransitionAction }, )); - let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { - ops.push(TokenOperation(TokenOperationType::TokenHistory { - token_id: self.token_id(), - owner_id, - nonce: identity_contract_nonce, - event: TokenEvent::ConfigUpdate( - self.update_token_configuration_item().clone(), - self.public_note_owned(), - ), - })); - } + ops.push(TokenOperation(TokenOperationType::TokenHistory { + token_id: self.token_id(), + owner_id, + nonce: identity_contract_nonce, + event: TokenEvent::ConfigUpdate( + self.update_token_configuration_item().clone(), + self.public_note_owned(), + ), + })); } Ok(ops) diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_destroy_frozen_funds_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_destroy_frozen_funds_transition.rs index 52b46800f9c..7f40fdd6960 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_destroy_frozen_funds_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_destroy_frozen_funds_transition.rs @@ -1,5 +1,4 @@ use dpp::block::epoch::Epoch; -use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; use dpp::group::action_event::GroupActionEvent; use dpp::group::group_action::GroupAction; use dpp::group::group_action::v0::GroupActionV0; @@ -79,19 +78,16 @@ impl DriveHighLevelBatchOperationConverter for TokenDestroyFrozenFundsTransition burn_amount: self.amount(), })); - let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { - ops.push(TokenOperation(TokenOperationType::TokenHistory { - token_id: self.token_id(), - owner_id, - nonce: identity_contract_nonce, - event: TokenEvent::DestroyFrozenFunds( - self.frozen_identity_id(), - self.amount(), - self.public_note_owned(), - ), - })); - } + ops.push(TokenOperation(TokenOperationType::TokenHistory { + token_id: self.token_id(), + owner_id, + nonce: identity_contract_nonce, + event: TokenEvent::DestroyFrozenFunds( + self.frozen_identity_id(), + self.amount(), + self.public_note_owned(), + ), + })); } Ok(ops) diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_emergency_action_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_emergency_action_transition.rs index 93a0899818e..7e9470b9972 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_emergency_action_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_emergency_action_transition.rs @@ -1,5 +1,4 @@ use dpp::block::epoch::Epoch; -use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; use dpp::group::action_event::GroupActionEvent; use dpp::group::group_action::GroupAction; use dpp::group::group_action::v0::GroupActionV0; @@ -78,18 +77,15 @@ impl DriveHighLevelBatchOperationConverter for TokenEmergencyActionTransitionAct status: self.emergency_action().resulting_status(platform_version)?, })); - let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { - ops.push(TokenOperation(TokenOperationType::TokenHistory { - token_id: self.token_id(), - owner_id, - nonce: identity_contract_nonce, - event: TokenEvent::EmergencyAction( - self.emergency_action(), - self.public_note_owned(), - ), - })); - } + ops.push(TokenOperation(TokenOperationType::TokenHistory { + token_id: self.token_id(), + owner_id, + nonce: identity_contract_nonce, + event: TokenEvent::EmergencyAction( + self.emergency_action(), + self.public_note_owned(), + ), + })); } Ok(ops) diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_freeze_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_freeze_transition.rs index 507db5e3dd1..0d453ed4a20 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_freeze_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_freeze_transition.rs @@ -1,5 +1,6 @@ use dpp::block::epoch::Epoch; use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_keeps_history_rules::accessors::v0::TokenKeepsHistoryRulesV0Getters; use dpp::group::action_event::GroupActionEvent; use dpp::group::group_action::GroupAction; use dpp::group::group_action::v0::GroupActionV0; @@ -81,7 +82,7 @@ impl DriveHighLevelBatchOperationConverter for TokenFreezeTransitionAction { })); let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { + if token_configuration.keeps_history().keeps_freezing_history() { ops.push(TokenOperation(TokenOperationType::TokenHistory { token_id: self.token_id(), owner_id, diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_mint_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_mint_transition.rs index 81cb0b2e1c0..1306088c33b 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_mint_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_mint_transition.rs @@ -1,5 +1,6 @@ use dpp::block::epoch::Epoch; use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_keeps_history_rules::accessors::v0::TokenKeepsHistoryRulesV0Getters; use dpp::group::action_event::GroupActionEvent; use dpp::group::group_action::GroupAction; use dpp::group::group_action::v0::GroupActionV0; @@ -84,7 +85,7 @@ impl DriveHighLevelBatchOperationConverter for TokenMintTransitionAction { })); let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { + if token_configuration.keeps_history().keeps_minting_history() { ops.push(TokenOperation(TokenOperationType::TokenHistory { token_id: self.token_id(), owner_id, diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transfer_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transfer_transition.rs index cb9db8c9b8d..e84ed462f54 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transfer_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transfer_transition.rs @@ -1,5 +1,6 @@ use dpp::block::epoch::Epoch; use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_keeps_history_rules::accessors::v0::TokenKeepsHistoryRulesV0Getters; use dpp::identifier::Identifier; use dpp::tokens::token_event::TokenEvent; use platform_version::version::PlatformVersion; @@ -54,7 +55,7 @@ impl DriveHighLevelBatchOperationConverter for TokenTransferTransitionAction { })); let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { + if token_configuration.keeps_history().keeps_transfer_history() { let (public_note, shared_encrypted_note, private_encrypted_note) = self.notes_owned(); ops.push(TokenOperation(TokenOperationType::TokenHistory { diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transition.rs index 91237b7f8b7..691674d9f34 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_transition.rs @@ -12,6 +12,7 @@ use crate::state_transition_action::batch::batched_transition::token_transition: use crate::state_transition_action::batch::batched_transition::token_transition::token_emergency_action_transition_action::TokenEmergencyActionTransitionActionAccessorsV0; use crate::state_transition_action::batch::batched_transition::token_transition::token_freeze_transition_action::TokenFreezeTransitionActionAccessorsV0; use crate::state_transition_action::batch::batched_transition::token_transition::token_mint_transition_action::TokenMintTransitionActionAccessorsV0; +use crate::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::TokenClaimTransitionActionAccessorsV0; use crate::state_transition_action::batch::batched_transition::token_transition::token_transfer_transition_action::TokenTransferTransitionActionAccessorsV0; use crate::state_transition_action::batch::batched_transition::token_transition::token_unfreeze_transition_action::TokenUnfreezeTransitionActionAccessorsV0; @@ -38,6 +39,8 @@ impl DriveHighLevelBatchOperationConverter for TokenTransitionAction { .into_high_level_batch_drive_operations(epoch, owner_id, platform_version), TokenTransitionAction::UnfreezeAction(token_unfreeze_action) => token_unfreeze_action .into_high_level_batch_drive_operations(epoch, owner_id, platform_version), + TokenTransitionAction::ClaimAction(token_claim) => token_claim + .into_high_level_batch_drive_operations(epoch, owner_id, platform_version), TokenTransitionAction::EmergencyActionAction(token_emergency_action) => { token_emergency_action.into_high_level_batch_drive_operations( epoch, @@ -90,6 +93,11 @@ impl TokenTransitionAction { unfreeze_action.frozen_identity_id(), unfreeze_action.public_note().cloned(), ), + TokenTransitionAction::ClaimAction(release_action) => TokenEvent::Claim( + release_action.distribution_info().into(), + release_action.amount(), + release_action.public_note().cloned(), + ), TokenTransitionAction::EmergencyActionAction(emergency_action) => { TokenEvent::EmergencyAction( emergency_action.emergency_action(), diff --git a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_unfreeze_transition.rs b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_unfreeze_transition.rs index d297c2cae19..0758e6955d4 100644 --- a/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_unfreeze_transition.rs +++ b/packages/rs-drive/src/state_transition_action/action_convert_to_operations/batch/token/token_unfreeze_transition.rs @@ -1,5 +1,6 @@ use dpp::block::epoch::Epoch; use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_keeps_history_rules::accessors::v0::TokenKeepsHistoryRulesV0Getters; use dpp::group::action_event::GroupActionEvent; use dpp::group::group_action::GroupAction; use dpp::group::group_action::v0::GroupActionV0; @@ -81,7 +82,7 @@ impl DriveHighLevelBatchOperationConverter for TokenUnfreezeTransitionAction { })); let token_configuration = self.base().token_configuration()?; - if token_configuration.keeps_history() { + if token_configuration.keeps_history().keeps_freezing_history() { ops.push(TokenOperation(TokenOperationType::TokenHistory { token_id: self.token_id(), owner_id, diff --git a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/mod.rs b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/mod.rs index df9f43a1991..37a25ec17ff 100644 --- a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/mod.rs +++ b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/mod.rs @@ -20,9 +20,14 @@ pub mod token_destroy_frozen_funds_transition_action; /// token_emergency_action_transition_action pub mod token_emergency_action_transition_action; +/// token_claim_transition_action +pub mod token_claim_transition_action; + use derive_more::From; use dpp::block::block_info::BlockInfo; use dpp::data_contract::accessors::v0::DataContractV0Getters; +use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_keeps_history_rules::accessors::v0::TokenKeepsHistoryRulesV0Getters; use dpp::data_contract::document_type::DocumentTypeRef; use dpp::data_contracts::SystemDataContract; use dpp::document::Document; @@ -31,7 +36,7 @@ use dpp::prelude::{DataContract, IdentityNonce}; use dpp::ProtocolError; use platform_version::version::PlatformVersion; use crate::error::Error; -use crate::state_transition_action::batch::batched_transition::token_transition::token_base_transition_action::TokenBaseTransitionAction; +use crate::state_transition_action::batch::batched_transition::token_transition::token_base_transition_action::{TokenBaseTransitionAction, TokenBaseTransitionActionAccessorsV0}; use crate::state_transition_action::batch::batched_transition::token_transition::token_burn_transition_action::{TokenBurnTransitionAction, TokenBurnTransitionActionAccessorsV0}; use crate::state_transition_action::batch::batched_transition::token_transition::token_config_update_transition_action::{TokenConfigUpdateTransitionAction, TokenConfigUpdateTransitionActionAccessorsV0}; use crate::state_transition_action::batch::batched_transition::token_transition::token_freeze_transition_action::{TokenFreezeTransitionAction, TokenFreezeTransitionActionAccessorsV0}; @@ -43,6 +48,7 @@ use crate::state_transition_action::batch::batched_transition::token_transition: use crate::state_transition_action::batch::batched_transition::token_transition::token_emergency_action_transition_action::TokenEmergencyActionTransitionActionAccessorsV0; use crate::state_transition_action::batch::batched_transition::token_transition::token_destroy_frozen_funds_transition_action::TokenDestroyFrozenFundsTransitionAction; use crate::state_transition_action::batch::batched_transition::token_transition::token_destroy_frozen_funds_transition_action::TokenDestroyFrozenFundsTransitionActionAccessorsV0; +use crate::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::{TokenClaimTransitionAction, TokenClaimTransitionActionAccessorsV0}; /// token action #[derive(Debug, Clone, From)] @@ -57,6 +63,8 @@ pub enum TokenTransitionAction { FreezeAction(TokenFreezeTransitionAction), /// unfreeze UnfreezeAction(TokenUnfreezeTransitionAction), + /// release + ClaimAction(TokenClaimTransitionAction), /// emergency action EmergencyActionAction(TokenEmergencyActionTransitionAction), /// destroy frozen funds action @@ -74,6 +82,7 @@ impl TokenTransitionAction { TokenTransitionAction::TransferAction(action) => action.base(), TokenTransitionAction::FreezeAction(action) => action.base(), TokenTransitionAction::UnfreezeAction(action) => action.base(), + TokenTransitionAction::ClaimAction(action) => action.base(), TokenTransitionAction::EmergencyActionAction(action) => action.base(), TokenTransitionAction::DestroyFrozenFundsAction(action) => action.base(), TokenTransitionAction::ConfigUpdateAction(action) => action.base(), @@ -88,6 +97,7 @@ impl TokenTransitionAction { TokenTransitionAction::TransferAction(action) => action.base_owned(), TokenTransitionAction::FreezeAction(action) => action.base_owned(), TokenTransitionAction::UnfreezeAction(action) => action.base_owned(), + TokenTransitionAction::ClaimAction(action) => action.base_owned(), TokenTransitionAction::EmergencyActionAction(action) => action.base_owned(), TokenTransitionAction::DestroyFrozenFundsAction(action) => action.base_owned(), TokenTransitionAction::ConfigUpdateAction(action) => action.base_owned(), @@ -102,6 +112,7 @@ impl TokenTransitionAction { TokenTransitionAction::TransferAction(_) => "transfer", TokenTransitionAction::FreezeAction(_) => "freeze", TokenTransitionAction::UnfreezeAction(_) => "unfreeze", + TokenTransitionAction::ClaimAction(_) => "claim", TokenTransitionAction::EmergencyActionAction(_) => "emergencyAction", TokenTransitionAction::DestroyFrozenFundsAction(_) => "destroyFrozenFunds", TokenTransitionAction::ConfigUpdateAction(_) => "configUpdate", @@ -152,4 +163,20 @@ impl TokenTransitionAction { ) .map_err(Error::Protocol) } + + /// Do we keep history for this action + pub fn keeps_history(&self) -> Result { + let keeps_history = self.base().token_configuration()?.keeps_history(); + match self { + TokenTransitionAction::BurnAction(_) => Ok(keeps_history.keeps_burning_history()), + TokenTransitionAction::MintAction(_) => Ok(keeps_history.keeps_minting_history()), + TokenTransitionAction::TransferAction(_) => Ok(keeps_history.keeps_transfer_history()), + TokenTransitionAction::FreezeAction(_) => Ok(keeps_history.keeps_freezing_history()), + TokenTransitionAction::UnfreezeAction(_) => Ok(keeps_history.keeps_freezing_history()), + TokenTransitionAction::ClaimAction(_) => Ok(true), + TokenTransitionAction::EmergencyActionAction(_) => Ok(true), + TokenTransitionAction::DestroyFrozenFundsAction(_) => Ok(true), + TokenTransitionAction::ConfigUpdateAction(_) => Ok(true), + } + } } diff --git a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/mod.rs b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/mod.rs new file mode 100644 index 00000000000..00cae9ac3f4 --- /dev/null +++ b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/mod.rs @@ -0,0 +1,81 @@ +use derive_more::From; +use dpp::balances::credits::TokenAmount; +use dpp::data_contract::associated_token::token_distribution_key::TokenDistributionInfo; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; + +/// transformer module for token release transition action +pub mod transformer; +mod v0; + +pub use v0::*; // re-export the v0 module items (including TokenIssuanceTransitionActionV0) + +use crate::state_transition_action::batch::batched_transition::token_transition::token_base_transition_action::TokenBaseTransitionAction; + +/// Token release transition action +#[derive(Debug, Clone, From)] +pub enum TokenClaimTransitionAction { + /// v0 + V0(TokenClaimTransitionActionV0), +} + +impl TokenClaimTransitionActionAccessorsV0 for TokenClaimTransitionAction { + fn base(&self) -> &TokenBaseTransitionAction { + match self { + TokenClaimTransitionAction::V0(v0) => &v0.base, + } + } + + fn base_owned(self) -> TokenBaseTransitionAction { + match self { + TokenClaimTransitionAction::V0(v0) => v0.base, + } + } + + fn amount(&self) -> TokenAmount { + match self { + TokenClaimTransitionAction::V0(v0) => v0.amount, + } + } + + fn set_amount(&mut self, amount: TokenAmount) { + match self { + TokenClaimTransitionAction::V0(v0) => v0.amount = amount, + } + } + + fn recipient(&self) -> TokenDistributionRecipient { + match self { + TokenClaimTransitionAction::V0(v0) => v0.recipient(), + } + } + + fn distribution_info(&self) -> &TokenDistributionInfo { + match self { + TokenClaimTransitionAction::V0(v0) => &v0.distribution_info, + } + } + + fn set_distribution_info(&mut self, distribution_info: TokenDistributionInfo) { + match self { + TokenClaimTransitionAction::V0(v0) => v0.distribution_info = distribution_info, + } + } + + fn public_note(&self) -> Option<&String> { + match self { + TokenClaimTransitionAction::V0(v0) => v0.public_note.as_ref(), + } + } + + fn public_note_owned(self) -> Option { + match self { + TokenClaimTransitionAction::V0(v0) => v0.public_note, + } + } + + fn set_public_note(&mut self, public_note: Option) { + match self { + TokenClaimTransitionAction::V0(v0) => v0.public_note = public_note, + } + } +} diff --git a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/transformer.rs b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/transformer.rs new file mode 100644 index 00000000000..db78a0e7b6e --- /dev/null +++ b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/transformer.rs @@ -0,0 +1,117 @@ +use dpp::platform_value::Identifier; +use dpp::ProtocolError; +use grovedb::TransactionArg; +use std::sync::Arc; +use dpp::block::block_info::BlockInfo; +use dpp::fee::fee_result::FeeResult; +use dpp::prelude::{ConsensusValidationResult, UserFeeIncrease}; +use crate::drive::contract::DataContractFetchInfo; +use crate::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::{TokenClaimTransitionActionV0, TokenClaimTransitionAction}; +use dpp::state_transition::batch_transition::token_claim_transition::TokenClaimTransition; +use platform_version::version::PlatformVersion; +use crate::drive::Drive; +use crate::error::Error; +use crate::state_transition_action::batch::BatchedTransitionAction; + +/// Implement methods to transform a `TokenClaimTransition` into a `TokenClaimTransitionAction`. +impl TokenClaimTransitionAction { + /// Transform a `TokenClaimTransition` into a `TokenClaimTransitionAction` using the provided data contract lookup. + /// + /// # Arguments + /// + /// * `drive` - A reference to the `Drive` instance used for accessing the system. + /// * `owner_id` - The identifier of the owner initiating the release transition. + /// * `transaction` - The transaction argument used for state changes. + /// * `value` - A `TokenClaimTransition` instance. + /// * `approximate_without_state_for_costs` - A flag indicating whether to approximate state costs without full state. + /// * `drive_operations` - A mutable reference to the vector of low-level operations that need to be performed. + /// * `get_data_contract` - A closure that fetches the `DataContractFetchInfo` given a contract ID. + /// * `platform_version` - The platform version for the context in which the transition is being executed. + /// + /// # Returns + /// + /// * `Result<(ConsensusValidationResult, FeeResult), Error>` - A `TokenClaimTransitionAction` if successful, otherwise `ProtocolError`. + pub fn try_from_token_claim_transition_with_contract_lookup( + drive: &Drive, + owner_id: Identifier, + value: TokenClaimTransition, + approximate_without_state_for_costs: bool, + transaction: TransactionArg, + block_info: &BlockInfo, + user_fee_increase: UserFeeIncrease, + get_data_contract: impl Fn(Identifier) -> Result, ProtocolError>, + platform_version: &PlatformVersion, + ) -> Result< + ( + ConsensusValidationResult, + FeeResult, + ), + Error, + > { + match value { + TokenClaimTransition::V0(v0) => { + TokenClaimTransitionActionV0::try_from_token_claim_transition_with_contract_lookup( + drive, + owner_id, + v0, + approximate_without_state_for_costs, + transaction, + block_info, + user_fee_increase, + get_data_contract, + platform_version, + ) + } + } + } + + /// Transform a borrowed `TokenClaimTransition` into a `TokenClaimTransitionAction` using the provided data contract lookup. + /// + /// # Arguments + /// + /// * `drive` - A reference to the `Drive` instance used for accessing the system. + /// * `owner_id` - The identifier of the owner initiating the release transition. + /// * `transaction` - The transaction argument used for state changes. + /// * `value` - A reference to a `TokenClaimTransition`. + /// * `approximate_without_state_for_costs` - A flag indicating whether to approximate state costs without full state. + /// * `drive_operations` - A mutable reference to the vector of low-level operations that need to be performed. + /// * `get_data_contract` - A closure that fetches the `DataContractFetchInfo` given a contract ID. + /// * `platform_version` - The platform version for the context in which the transition is being executed. + /// + /// # Returns + /// + /// * `Result<(ConsensusValidationResult, FeeResult), Error>` - A `TokenClaimTransitionAction` if successful, otherwise `ProtocolError`. + pub fn try_from_borrowed_token_claim_transition_with_contract_lookup( + drive: &Drive, + owner_id: Identifier, + value: &TokenClaimTransition, + approximate_without_state_for_costs: bool, + transaction: TransactionArg, + block_info: &BlockInfo, + user_fee_increase: UserFeeIncrease, + get_data_contract: impl Fn(Identifier) -> Result, ProtocolError>, + platform_version: &PlatformVersion, + ) -> Result< + ( + ConsensusValidationResult, + FeeResult, + ), + Error, + > { + match value { + TokenClaimTransition::V0(v0) => { + TokenClaimTransitionActionV0::try_from_borrowed_token_claim_transition_with_contract_lookup( + drive, + owner_id, + v0, + approximate_without_state_for_costs, + transaction, + block_info, + user_fee_increase, + get_data_contract, + platform_version, + ) + } + } + } +} diff --git a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/v0/mod.rs b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/v0/mod.rs new file mode 100644 index 00000000000..e18b7882496 --- /dev/null +++ b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/v0/mod.rs @@ -0,0 +1,129 @@ +mod transformer; + +use std::sync::Arc; +use dpp::balances::credits::TokenAmount; +use dpp::data_contract::associated_token::token_distribution_key::TokenDistributionInfo; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; +use dpp::identifier::Identifier; +use crate::drive::contract::DataContractFetchInfo; +use crate::state_transition_action::batch::batched_transition::token_transition::token_base_transition_action::{TokenBaseTransitionAction, TokenBaseTransitionActionAccessorsV0}; + +/// Token release transition action v0 +#[derive(Debug, Clone)] +pub struct TokenClaimTransitionActionV0 { + /// Base token transition action + pub base: TokenBaseTransitionAction, + /// Amount to be released, + /// if this is a release to Evonodes or a group, this is the total amount that later needs + /// to be split up + pub amount: TokenAmount, + /// The type of distribution we are targeting + pub distribution_info: TokenDistributionInfo, + /// A public note + pub public_note: Option, +} + +/// Accessors for `TokenClaimTransitionActionV0` +pub trait TokenClaimTransitionActionAccessorsV0 { + /// Returns a reference to the base token transition action + fn base(&self) -> &TokenBaseTransitionAction; + + /// Consumes self and returns the base token transition action + fn base_owned(self) -> TokenBaseTransitionAction; + + /// Returns the token position in the contract + fn token_position(&self) -> u16 { + self.base().token_position() + } + + /// Returns the token ID + fn token_id(&self) -> Identifier { + self.base().token_id() + } + + /// Returns the data contract ID + fn data_contract_id(&self) -> Identifier { + self.base().data_contract_id() + } + + /// Returns a reference to the data contract fetch info + fn data_contract_fetch_info_ref(&self) -> &Arc { + self.base().data_contract_fetch_info_ref() + } + + /// Returns the data contract fetch info + fn data_contract_fetch_info(&self) -> Arc { + self.base().data_contract_fetch_info() + } + + /// Returns the amount to be released + fn amount(&self) -> TokenAmount; + + /// Sets the amount to be released + fn set_amount(&mut self, amount: TokenAmount); + + /// Returns the recipient of the distribution + fn recipient(&self) -> TokenDistributionRecipient; + + /// Returns the type of distribution with its recipient + fn distribution_info(&self) -> &TokenDistributionInfo; + + /// Sets the type of distribution with its recipient + fn set_distribution_info(&mut self, distribution_type: TokenDistributionInfo); + + /// Returns the public note (optional) + fn public_note(&self) -> Option<&String>; + + /// Returns the public note (owned) + fn public_note_owned(self) -> Option; + + /// Sets the public note + fn set_public_note(&mut self, public_note: Option); +} + +impl TokenClaimTransitionActionAccessorsV0 for TokenClaimTransitionActionV0 { + fn base(&self) -> &TokenBaseTransitionAction { + &self.base + } + + fn base_owned(self) -> TokenBaseTransitionAction { + self.base + } + + fn amount(&self) -> TokenAmount { + self.amount + } + + fn set_amount(&mut self, amount: TokenAmount) { + self.amount = amount; + } + + fn recipient(&self) -> TokenDistributionRecipient { + match &self.distribution_info { + TokenDistributionInfo::PreProgrammed(_, identifier) => { + TokenDistributionRecipient::Identity(*identifier) + } + TokenDistributionInfo::Perpetual(_, _, resolved_recipient) => resolved_recipient.into(), + } + } + + fn distribution_info(&self) -> &TokenDistributionInfo { + &self.distribution_info + } + + fn set_distribution_info(&mut self, distribution_info: TokenDistributionInfo) { + self.distribution_info = distribution_info; + } + + fn public_note(&self) -> Option<&String> { + self.public_note.as_ref() + } + + fn public_note_owned(self) -> Option { + self.public_note + } + + fn set_public_note(&mut self, public_note: Option) { + self.public_note = public_note; + } +} diff --git a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/v0/transformer.rs b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/v0/transformer.rs new file mode 100644 index 00000000000..b01a110e727 --- /dev/null +++ b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_claim_transition_action/v0/transformer.rs @@ -0,0 +1,399 @@ +use std::collections::BTreeMap; +use std::sync::Arc; +use grovedb::TransactionArg; +use dpp::block::block_info::BlockInfo; +use dpp::block::epoch::EpochIndex; +use dpp::block::finalized_epoch_info::FinalizedEpochInfo; +use dpp::consensus::ConsensusError; +use dpp::consensus::state::state_error::StateError; +use dpp::consensus::state::token::InvalidTokenClaimPropertyMismatch; +use dpp::data_contract::accessors::v0::DataContractV0Getters; +use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_distribution_key::{TokenDistributionInfo, TokenDistributionType}; +use dpp::data_contract::associated_token::token_distribution_rules::accessors::v0::TokenDistributionRulesV0Getters; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::{TokenDistributionRecipient, TokenDistributionResolvedRecipient}; +use dpp::data_contract::associated_token::token_perpetual_distribution::methods::v0::TokenPerpetualDistributionV0Accessors; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; +use dpp::identifier::Identifier; +use dpp::state_transition::batch_transition::token_claim_transition::v0::TokenClaimTransitionV0; +use dpp::ProtocolError; +use crate::drive::contract::DataContractFetchInfo; +use crate::state_transition_action::batch::batched_transition::token_transition::token_base_transition_action::{TokenBaseTransitionAction, TokenBaseTransitionActionAccessorsV0}; +use crate::state_transition_action::batch::batched_transition::token_transition::token_claim_transition_action::v0::TokenClaimTransitionActionV0; +use dpp::fee::fee_result::FeeResult; +use dpp::prelude::{ConsensusValidationResult, UserFeeIncrease}; +use dpp::state_transition::batch_transition::token_base_transition::token_base_transition_accessors::TokenBaseTransitionAccessors; +use dpp::state_transition::batch_transition::token_base_transition::v0::v0_methods::TokenBaseTransitionV0Methods; +use platform_version::version::PlatformVersion; +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::state_transition_action::batch::batched_transition::BatchedTransitionAction; +use crate::state_transition_action::batch::batched_transition::token_transition::TokenTransitionAction; +use crate::state_transition_action::system::bump_identity_data_contract_nonce_action::BumpIdentityDataContractNonceAction; + +impl TokenClaimTransitionActionV0 { + /// Converts a `TokenClaimTransitionV0` into a `TokenClaimTransitionActionV0` using the provided contract lookup. + /// + /// This method processes the token releasing transition and returns the corresponding transition action + /// while looking up necessary data contracts and applying the relevant logic for releasing. + /// + /// # Arguments + /// + /// * `drive` - A reference to the `Drive` instance which handles data storage and retrieval. + /// * `owner_id` - The identifier of the owner initiating the releasing transition. This is typically the identity + /// performing the transaction, such as the user's ID. + /// * `transaction` - A transaction context that includes the necessary state and other details for the transition. + /// * `value` - The `TokenClaimTransitionV0` struct containing the transition data, including token amount and recipient. + /// * `approximate_without_state_for_costs` - A flag to determine if costs should be approximated without considering + /// the full state for the operation. Useful for optimizing the transaction cost calculations. + /// * `block_info` - Information about the current block to calculate fees. + /// * `get_data_contract` - A closure function that takes a contract identifier and returns a `DataContractFetchInfo` + /// containing the data contract details, including token configurations. + /// * `platform_version` - A reference to the platform version, ensuring the transition respects version-specific logic. + /// + /// # Returns + /// + /// * `Result, Error>` - Returns the constructed `TokenClaimTransitionActionV0` if successful, + /// or an error if any issue arises, such as missing data or an invalid state transition. + pub fn try_from_token_claim_transition_with_contract_lookup( + drive: &Drive, + owner_id: Identifier, + value: TokenClaimTransitionV0, + approximate_without_state_for_costs: bool, + transaction: TransactionArg, + block_info: &BlockInfo, + user_fee_increase: UserFeeIncrease, + get_data_contract: impl Fn(Identifier) -> Result, ProtocolError>, + platform_version: &PlatformVersion, + ) -> Result< + ( + ConsensusValidationResult, + FeeResult, + ), + Error, + > { + let TokenClaimTransitionV0 { + base, + distribution_type, + public_note, + } = value; + + let mut drive_operations = vec![]; + + let base_action_validation_result = + TokenBaseTransitionAction::try_from_borrowed_base_transition_with_contract_lookup( + drive, + owner_id, + &base, + approximate_without_state_for_costs, + transaction, + &mut drive_operations, + get_data_contract, + platform_version, + )?; + + let fee_result = Drive::calculate_fee( + None, + Some(drive_operations), + &block_info.epoch, + drive.config.epochs_per_era, + platform_version, + None, + )?; + + let base_action = match base_action_validation_result.is_valid() { + true => base_action_validation_result.into_data()?, + false => { + let bump_action = BumpIdentityDataContractNonceAction::from_token_base_transition( + base, + owner_id, + user_fee_increase, + ); + let batched_action = + BatchedTransitionAction::BumpIdentityDataContractNonce(bump_action); + + return Ok(( + ConsensusValidationResult::new_with_data_and_errors( + batched_action.into(), + base_action_validation_result.errors, + ), + fee_result, + )); + } + }; + + Ok(( + BatchedTransitionAction::TokenAction(TokenTransitionAction::ClaimAction( + TokenClaimTransitionActionV0 { + base: base_action, + amount: 0, //todo + distribution_info: todo!(), + public_note, + } + .into(), + )) + .into(), + fee_result, + )) + } + + /// Converts a borrowed `TokenClaimTransitionV0` into a `TokenClaimTransitionActionV0` using the provided contract lookup. + /// + /// This method processes the token releasing transition and constructs the corresponding transition action while + /// looking up necessary data contracts and applying the relevant releasing logic. It does not require `drive_operations` + /// to be passed as a parameter, but it manages them internally. + /// + /// # Arguments + /// + /// * `drive` - A reference to the `Drive` instance that handles data storage and retrieval. + /// * `owner_id` - The identifier of the owner initiating the releasing transition. This is typically the identity + /// performing the transaction, such as the user's ID. + /// * `value` - A reference to the `TokenClaimTransitionV0` struct containing the transition data, including token + /// amount and recipient. + /// * `approximate_without_state_for_costs` - A flag to indicate whether costs should be approximated without full + /// state consideration. Useful for optimizing transaction cost calculations in scenarios where full state is not needed. + /// * `transaction` - The transaction context, which includes the necessary state and other details for the transition. + /// * `block_info` - Information about the current block (e.g., epoch) to help calculate transaction fees. + /// * `get_data_contract` - A closure function that takes a contract identifier and returns a `DataContractFetchInfo` + /// containing the data contract details, including token configurations. + /// * `platform_version` - A reference to the platform version to ensure the transition respects version-specific logic. + /// + //// # Returns + /// + /// * `Result<(ConsensusValidationResult, FeeResult), Error>` - Returns a tuple containing the constructed + /// `TokenClaimTransitionActionV0` and a `FeeResult` if successful. If an error occurs (e.g., missing data or + /// invalid state transition), it returns an `Error`. + /// + pub fn try_from_borrowed_token_claim_transition_with_contract_lookup( + drive: &Drive, + owner_id: Identifier, + value: &TokenClaimTransitionV0, + approximate_without_state_for_costs: bool, + transaction: TransactionArg, + block_info: &BlockInfo, + user_fee_increase: UserFeeIncrease, + get_data_contract: impl Fn(Identifier) -> Result, ProtocolError>, + platform_version: &PlatformVersion, + ) -> Result< + ( + ConsensusValidationResult, + FeeResult, + ), + Error, + > { + let TokenClaimTransitionV0 { + base, + distribution_type, + public_note, + } = value; + + let mut drive_operations = vec![]; + + let base_action_validation_result = + TokenBaseTransitionAction::try_from_borrowed_base_transition_with_contract_lookup( + drive, + owner_id, + base, + approximate_without_state_for_costs, + transaction, + &mut drive_operations, + get_data_contract, + platform_version, + )?; + + let mut fee_result = Drive::calculate_fee( + None, + Some(drive_operations), + &block_info.epoch, + drive.config.epochs_per_era, + platform_version, + None, + )?; + + let base_action = match base_action_validation_result.is_valid() { + true => base_action_validation_result.into_data()?, + false => { + let bump_action = + BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition( + base, + owner_id, + user_fee_increase, + ); + let batched_action = + BatchedTransitionAction::BumpIdentityDataContractNonce(bump_action); + + return Ok(( + ConsensusValidationResult::new_with_data_and_errors( + batched_action.into(), + base_action_validation_result.errors, + ), + fee_result, + )); + } + }; + + let token_config = base_action.token_configuration()?; + + let (amount, distribution_info) = match distribution_type { + TokenDistributionType::PreProgrammed => { + let Some(pre_programmed_distribution) = token_config + .distribution_rules() + .pre_programmed_distribution() + else { + let bump_action = + BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition( + base, + owner_id, + user_fee_increase, + ); + let batched_action = + BatchedTransitionAction::BumpIdentityDataContractNonce(bump_action); + + return Ok(( + ConsensusValidationResult::new_with_data_and_errors( + batched_action.into(), + vec![ConsensusError::StateError( + StateError::InvalidTokenClaimPropertyMismatch( + InvalidTokenClaimPropertyMismatch::new( + "pre programmed distribution", + base.token_id(), + ), + ), + )], + ), + fee_result, + )); + }; + + // We need to find the oldest pre-programmed distribution that wasn't yet claimed + // for this identity + let oldest_time = 0; + + let amount = 0; + + (amount, TokenDistributionInfo::PreProgrammed(0, owner_id)) + } + TokenDistributionType::Perpetual => { + // we need to validate that we have a perpetual distribution + let Some(perpetual_distribution) = + token_config.distribution_rules().perpetual_distribution() + else { + let bump_action = + BumpIdentityDataContractNonceAction::from_borrowed_token_base_transition( + base, + owner_id, + user_fee_increase, + ); + let batched_action = + BatchedTransitionAction::BumpIdentityDataContractNonce(bump_action); + + return Ok(( + ConsensusValidationResult::new_with_data_and_errors( + batched_action.into(), + vec![ConsensusError::StateError( + StateError::InvalidTokenClaimPropertyMismatch( + InvalidTokenClaimPropertyMismatch::new( + "perpetual distribution", + value.base().token_id(), + ), + ), + )], + ), + fee_result, + )); + }; + + let mut last_paid_time_operations = vec![]; + + let last_paid_moment = drive + .fetch_perpetual_distribution_last_paid_moment_operations( + base.token_id().to_buffer(), + owner_id, + perpetual_distribution.distribution_type(), + &mut last_paid_time_operations, + transaction, + platform_version, + )?; + + // if the token has never been paid then we use the token creation + + let start_from_moment_for_distribution = last_paid_moment + .or(perpetual_distribution + .distribution_type() + .contract_creation_moment(&base_action.data_contract_fetch_info().contract)) + .ok_or(Error::Drive(DriveError::ContractDoesNotHaveAStartMoment( + base_action.data_contract_fetch_info().contract.id(), + )))?; + + let last_paid_time_fee_result = Drive::calculate_fee( + None, + Some(last_paid_time_operations), + &block_info.epoch, + drive.config.epochs_per_era, + platform_version, + None, + )?; + + fee_result.checked_add_assign(last_paid_time_fee_result)?; + + let recipient = match perpetual_distribution.distribution_recipient() { + TokenDistributionRecipient::ContractOwner => { + TokenDistributionResolvedRecipient::ContractOwnerIdentity( + base_action.data_contract_fetch_info().contract.owner_id(), + ) + } + TokenDistributionRecipient::Identity(identifier) => { + TokenDistributionResolvedRecipient::Identity(identifier) + } + TokenDistributionRecipient::EvonodesByParticipation => { + let RewardDistributionMoment::EpochBasedMoment(epoch_index) = + start_from_moment_for_distribution + else { + return Err(Error::Drive(DriveError::NotSupported( + "evonodes by participation can only use epoch based distribution", + ))); + }; + let epochs: BTreeMap = drive + .get_finalized_epoch_infos( + epoch_index, + true, + block_info.epoch.index, + false, + transaction, + platform_version, + )?; + TokenDistributionResolvedRecipient::Evonode(owner_id) + } + }; + + let amount = perpetual_distribution + .distribution_type() + .rewards_in_interval(start_from_moment_for_distribution, block_info)?; + + ( + amount, + TokenDistributionInfo::Perpetual( + RewardDistributionMoment::TimeBasedMoment(0), + RewardDistributionMoment::TimeBasedMoment(0), + recipient, + ), + ) + } + }; + + Ok(( + BatchedTransitionAction::TokenAction(TokenTransitionAction::ClaimAction( + TokenClaimTransitionActionV0 { + base: base_action, + amount, + distribution_info, + public_note: public_note.clone(), + } + .into(), + )) + .into(), + fee_result, + )) + } +} diff --git a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_transition_action_type.rs b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_transition_action_type.rs index 1d4e07a908e..75f617670a4 100644 --- a/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_transition_action_type.rs +++ b/packages/rs-drive/src/state_transition_action/batch/batched_transition/token_transition/token_transition_action_type.rs @@ -11,6 +11,7 @@ impl TokenTransitionActionTypeGetter for TokenTransitionAction { TokenTransitionAction::TransferAction(_) => TokenTransitionActionType::Transfer, TokenTransitionAction::FreezeAction(_) => TokenTransitionActionType::Freeze, TokenTransitionAction::UnfreezeAction(_) => TokenTransitionActionType::Unfreeze, + TokenTransitionAction::ClaimAction(_) => TokenTransitionActionType::Claim, TokenTransitionAction::EmergencyActionAction(_) => { TokenTransitionActionType::EmergencyAction } diff --git a/packages/rs-drive/src/state_transition_action/contract/data_contract_create/transformer.rs b/packages/rs-drive/src/state_transition_action/contract/data_contract_create/transformer.rs index c195fc68d9c..a768c02e370 100644 --- a/packages/rs-drive/src/state_transition_action/contract/data_contract_create/transformer.rs +++ b/packages/rs-drive/src/state_transition_action/contract/data_contract_create/transformer.rs @@ -1,5 +1,6 @@ use crate::state_transition_action::contract::data_contract_create::v0::DataContractCreateTransitionActionV0; use crate::state_transition_action::contract::data_contract_create::DataContractCreateTransitionAction; +use dpp::block::block_info::BlockInfo; use dpp::state_transition::data_contract_create_transition::DataContractCreateTransition; use dpp::validation::operations::ProtocolValidationOperation; use dpp::ProtocolError; @@ -11,6 +12,7 @@ impl DataContractCreateTransitionAction { /// if validation is false, the data contract base structure is created regardless of if it is valid pub fn try_from_transition( value: DataContractCreateTransition, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, @@ -19,6 +21,7 @@ impl DataContractCreateTransitionAction { DataContractCreateTransition::V0(v0) => { Ok(DataContractCreateTransitionActionV0::try_from_transition( v0, + block_info, full_validation, validation_operations, platform_version, @@ -34,6 +37,7 @@ impl DataContractCreateTransitionAction { pub fn try_from_borrowed_transition( value: &DataContractCreateTransition, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, @@ -42,6 +46,7 @@ impl DataContractCreateTransitionAction { DataContractCreateTransition::V0(v0) => Ok( DataContractCreateTransitionActionV0::try_from_borrowed_transition( v0, + block_info, full_validation, validation_operations, platform_version, diff --git a/packages/rs-drive/src/state_transition_action/contract/data_contract_create/v0/transformer.rs b/packages/rs-drive/src/state_transition_action/contract/data_contract_create/v0/transformer.rs index 64a1497b2dd..8b829dfeccb 100644 --- a/packages/rs-drive/src/state_transition_action/contract/data_contract_create/v0/transformer.rs +++ b/packages/rs-drive/src/state_transition_action/contract/data_contract_create/v0/transformer.rs @@ -1,4 +1,6 @@ use crate::state_transition_action::contract::data_contract_create::v0::DataContractCreateTransitionActionV0; +use dpp::block::block_info::BlockInfo; +use dpp::data_contract::accessors::v1::DataContractV1Setters; use dpp::prelude::DataContract; use dpp::state_transition::data_contract_create_transition::DataContractCreateTransitionV0; use dpp::validation::operations::ProtocolValidationOperation; @@ -8,17 +10,22 @@ use platform_version::version::PlatformVersion; impl DataContractCreateTransitionActionV0 { pub(in crate::state_transition_action::contract::data_contract_create) fn try_from_transition( value: DataContractCreateTransitionV0, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, ) -> Result { + let mut data_contract = DataContract::try_from_platform_versioned( + value.data_contract, + full_validation, + validation_operations, + platform_version, + )?; + data_contract.set_created_at(Some(block_info.time_ms)); + data_contract.set_created_at_epoch(Some(block_info.epoch.index)); + data_contract.set_created_at_block_height(Some(block_info.height)); Ok(DataContractCreateTransitionActionV0 { - data_contract: DataContract::try_from_platform_versioned( - value.data_contract, - full_validation, - validation_operations, - platform_version, - )?, + data_contract, identity_nonce: value.identity_nonce, user_fee_increase: value.user_fee_increase, }) @@ -26,17 +33,22 @@ impl DataContractCreateTransitionActionV0 { pub(in crate::state_transition_action::contract::data_contract_create) fn try_from_borrowed_transition( value: &DataContractCreateTransitionV0, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, ) -> Result { + let mut data_contract = DataContract::try_from_platform_versioned( + value.data_contract.clone(), + full_validation, + validation_operations, + platform_version, + )?; + data_contract.set_created_at(Some(block_info.time_ms)); + data_contract.set_created_at_epoch(Some(block_info.epoch.index)); + data_contract.set_created_at_block_height(Some(block_info.height)); Ok(DataContractCreateTransitionActionV0 { - data_contract: DataContract::try_from_platform_versioned( - value.data_contract.clone(), - full_validation, - validation_operations, - platform_version, - )?, + data_contract, identity_nonce: value.identity_nonce, user_fee_increase: value.user_fee_increase, }) diff --git a/packages/rs-drive/src/state_transition_action/contract/data_contract_update/mod.rs b/packages/rs-drive/src/state_transition_action/contract/data_contract_update/mod.rs index 3ea424e0e62..cf1811e0da8 100644 --- a/packages/rs-drive/src/state_transition_action/contract/data_contract_update/mod.rs +++ b/packages/rs-drive/src/state_transition_action/contract/data_contract_update/mod.rs @@ -29,6 +29,13 @@ impl DataContractUpdateTransitionAction { } } + /// data contract mut + pub fn data_contract_mut(&mut self) -> &mut DataContract { + match self { + DataContractUpdateTransitionAction::V0(transition) => &mut transition.data_contract, + } + } + /// identity contract nonce pub fn identity_contract_nonce(&self) -> IdentityNonce { match self { diff --git a/packages/rs-drive/src/state_transition_action/contract/data_contract_update/transformer.rs b/packages/rs-drive/src/state_transition_action/contract/data_contract_update/transformer.rs index d8ae02dbf59..b3026a36dd8 100644 --- a/packages/rs-drive/src/state_transition_action/contract/data_contract_update/transformer.rs +++ b/packages/rs-drive/src/state_transition_action/contract/data_contract_update/transformer.rs @@ -1,5 +1,6 @@ use crate::state_transition_action::contract::data_contract_update::v0::DataContractUpdateTransitionActionV0; use crate::state_transition_action::contract::data_contract_update::DataContractUpdateTransitionAction; +use dpp::block::block_info::BlockInfo; use dpp::state_transition::data_contract_update_transition::DataContractUpdateTransition; use dpp::validation::operations::ProtocolValidationOperation; use dpp::ProtocolError; @@ -11,6 +12,7 @@ impl DataContractUpdateTransitionAction { /// if validation is false, the data contract base structure is created regardless of if it is valid pub fn try_from_transition( value: DataContractUpdateTransition, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, @@ -19,6 +21,7 @@ impl DataContractUpdateTransitionAction { DataContractUpdateTransition::V0(v0) => { Ok(DataContractUpdateTransitionActionV0::try_from_transition( v0, + block_info, full_validation, validation_operations, platform_version, @@ -34,6 +37,7 @@ impl DataContractUpdateTransitionAction { pub fn try_from_borrowed_transition( value: &DataContractUpdateTransition, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, @@ -42,6 +46,7 @@ impl DataContractUpdateTransitionAction { DataContractUpdateTransition::V0(v0) => Ok( DataContractUpdateTransitionActionV0::try_from_borrowed_transition( v0, + block_info, full_validation, validation_operations, platform_version, diff --git a/packages/rs-drive/src/state_transition_action/contract/data_contract_update/v0/transformer.rs b/packages/rs-drive/src/state_transition_action/contract/data_contract_update/v0/transformer.rs index ced97b3c7ae..413320d903f 100644 --- a/packages/rs-drive/src/state_transition_action/contract/data_contract_update/v0/transformer.rs +++ b/packages/rs-drive/src/state_transition_action/contract/data_contract_update/v0/transformer.rs @@ -1,4 +1,6 @@ use crate::state_transition_action::contract::data_contract_update::v0::DataContractUpdateTransitionActionV0; +use dpp::block::block_info::BlockInfo; +use dpp::data_contract::accessors::v1::DataContractV1Setters; use dpp::data_contract::DataContract; use dpp::state_transition::data_contract_update_transition::DataContractUpdateTransitionV0; use dpp::validation::operations::ProtocolValidationOperation; @@ -8,17 +10,22 @@ use platform_version::version::PlatformVersion; impl DataContractUpdateTransitionActionV0 { pub(in crate::state_transition_action::contract::data_contract_update) fn try_from_transition( value: DataContractUpdateTransitionV0, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, ) -> Result { + let mut data_contract = DataContract::try_from_platform_versioned( + value.data_contract, + full_validation, + validation_operations, + platform_version, + )?; + data_contract.set_updated_at(Some(block_info.time_ms)); + data_contract.set_updated_at_epoch(Some(block_info.epoch.index)); + data_contract.set_updated_at_block_height(Some(block_info.height)); Ok(DataContractUpdateTransitionActionV0 { - data_contract: DataContract::try_from_platform_versioned( - value.data_contract, - full_validation, - validation_operations, - platform_version, - )?, + data_contract, identity_contract_nonce: value.identity_contract_nonce, user_fee_increase: value.user_fee_increase, }) @@ -26,17 +33,22 @@ impl DataContractUpdateTransitionActionV0 { pub(in crate::state_transition_action::contract::data_contract_update) fn try_from_borrowed_transition( value: &DataContractUpdateTransitionV0, + block_info: &BlockInfo, full_validation: bool, validation_operations: &mut Vec, platform_version: &PlatformVersion, ) -> Result { + let mut data_contract = DataContract::try_from_platform_versioned( + value.data_contract.clone(), + full_validation, + validation_operations, + platform_version, + )?; + data_contract.set_updated_at(Some(block_info.time_ms)); + data_contract.set_updated_at_epoch(Some(block_info.epoch.index)); + data_contract.set_updated_at_block_height(Some(block_info.height)); Ok(DataContractUpdateTransitionActionV0 { - data_contract: DataContract::try_from_platform_versioned( - value.data_contract.clone(), - full_validation, - validation_operations, - platform_version, - )?, + data_contract, identity_contract_nonce: value.identity_contract_nonce, user_fee_increase: value.user_fee_increase, }) diff --git a/packages/rs-drive/src/util/batch/drive_op_batch/token.rs b/packages/rs-drive/src/util/batch/drive_op_batch/token.rs index ad08114540e..ce5aac2c7eb 100644 --- a/packages/rs-drive/src/util/batch/drive_op_batch/token.rs +++ b/packages/rs-drive/src/util/batch/drive_op_batch/token.rs @@ -5,13 +5,15 @@ use crate::util::batch::drive_op_batch::DriveLowLevelOperationConverter; use dpp::balances::credits::TokenAmount; use dpp::block::block_info::BlockInfo; use dpp::identifier::Identifier; -use dpp::prelude::IdentityNonce; +use dpp::prelude::{IdentityNonce, TimestampMillis}; use dpp::tokens::status::TokenStatus; use dpp::tokens::token_event::TokenEvent; use grovedb::batch::KeyInfoPath; use grovedb::{EstimatedLayerInformation, TransactionArg}; use platform_version::version::PlatformVersion; use std::collections::HashMap; +use dpp::data_contract::associated_token::token_perpetual_distribution::distribution_recipient::TokenDistributionRecipient; +use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; /// Operations on Tokens #[derive(Clone, Debug)] @@ -29,13 +31,50 @@ pub enum TokenOperationType { TokenMint { /// The token id token_id: Identifier, - /// The identity to burn from + /// The identity to mint to identity_balance_holder_id: Identifier, /// The amount to issue mint_amount: TokenAmount, /// Should we allow this to be the first ever mint allow_first_mint: bool, }, + /// Mints tokens to many recipients + TokenMintMany { + /// The token id + token_id: Identifier, + /// The identities that will receive this amount along with their weight + recipients: Vec<(Identifier, u64)>, + /// The amount to issue + mint_amount: TokenAmount, + /// Should we allow this to be the first ever mint + allow_first_mint: bool, + }, + /// Marks the perpetual release as distributed + /// This removes the references in the queue + TokenMarkPerpetualReleaseAsDistributed { + /// The token id + token_id: Identifier, + /// The owner of this operation, generally the person making the state transition + owner_id: Identifier, + /// The last release time, block or epoch + last_release_moment: RewardDistributionMoment, + /// The next known release time, block or epoch + next_release_moment: RewardDistributionMoment, + /// The recipient + recipient: TokenDistributionRecipient, + }, + /// Marks the pre-programmed release as distributed + /// This removes the references in the queue + TokenMarkPreProgrammedReleaseAsDistributed { + /// The token id + token_id: Identifier, + /// The owner of this operation, generally the person making the state transition + owner_id: Identifier, + /// The identity that had their pre-programmed release set + identity_id: Identifier, + /// The last release time, block or epoch + release_time: TimestampMillis, + }, /// Performs a token transfer TokenTransfer { /// The token id @@ -129,6 +168,23 @@ impl DriveLowLevelOperationConverter for TokenOperationType { )?; Ok(batch_operations) } + TokenOperationType::TokenMintMany { + token_id, + recipients, + mint_amount, + allow_first_mint, + } => { + let batch_operations = drive.token_mint_many_operations( + token_id, + recipients, + mint_amount, + allow_first_mint, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + )?; + Ok(batch_operations) + } TokenOperationType::TokenTransfer { token_id, sender_id, @@ -203,6 +259,45 @@ impl DriveLowLevelOperationConverter for TokenOperationType { )?; Ok(batch_operations) } + TokenOperationType::TokenMarkPerpetualReleaseAsDistributed { + token_id, + owner_id, + last_release_moment, + next_release_moment, + recipient, + } => { + let batch_operations = drive.mark_perpetual_release_as_distributed_operations( + token_id.to_buffer(), + owner_id.to_buffer(), + last_release_moment, + next_release_moment, + recipient, + block_info, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + )?; + Ok(batch_operations) + } + TokenOperationType::TokenMarkPreProgrammedReleaseAsDistributed { + token_id, + owner_id, + identity_id, + release_time, + } => { + let batch_operations = drive + .mark_pre_programmed_release_as_distributed_operations( + token_id.to_buffer(), + owner_id.to_buffer(), + identity_id.to_buffer(), + release_time, + block_info, + estimated_costs_only_with_layer_info, + transaction, + platform_version, + )?; + Ok(batch_operations) + } } } } diff --git a/packages/rs-drive/src/util/batch/grovedb_op_batch/mod.rs b/packages/rs-drive/src/util/batch/grovedb_op_batch/mod.rs index ecf25027e2f..4939acfd9a7 100644 --- a/packages/rs-drive/src/util/batch/grovedb_op_batch/mod.rs +++ b/packages/rs-drive/src/util/batch/grovedb_op_batch/mod.rs @@ -5,7 +5,12 @@ use crate::drive::credit_pools::epochs; use crate::drive::identity::IdentityRootStructure; -use crate::drive::{credit_pools, RootTree}; +use crate::drive::{credit_pools, tokens, RootTree}; +use crate::util::batch::grovedb_op_batch::KnownPath::{ + TokenBalancesRoot, TokenDistributionRoot, TokenIdentityInfoRoot, + TokenPerpetualDistributionRoot, TokenPreProgrammedDistributionRoot, TokenStatusRoot, + TokenTimedDistributionRoot, +}; use crate::util::storage_flags::StorageFlags; use dpp::block::epoch::Epoch; use dpp::identity::{Purpose, SecurityLevel}; @@ -49,7 +54,14 @@ enum KnownPath { MiscRoot, //Level 1 WithdrawalTransactionsRoot, //Level 1 BalancesRoot, //Level 1 - TokenBalancesRoot, //Level 1 + TokenRoot, //Level 1 + TokenBalancesRoot, //Level 2 + TokenDistributionRoot, //Level 2 + TokenTimedDistributionRoot, //Level 3 + TokenPreProgrammedDistributionRoot, //Level 3 + TokenPerpetualDistributionRoot, //Level 3 + TokenIdentityInfoRoot, //Level 2 + TokenStatusRoot, //Level 2 VersionsRoot, //Level 1 VotesRoot, //Level 1 GroupActionsRoot, //Level 1 @@ -72,7 +84,7 @@ impl From for KnownPath { RootTree::Misc => KnownPath::MiscRoot, RootTree::WithdrawalTransactions => KnownPath::WithdrawalTransactionsRoot, RootTree::Balances => KnownPath::BalancesRoot, - RootTree::Tokens => KnownPath::TokenBalancesRoot, + RootTree::Tokens => KnownPath::TokenRoot, RootTree::Versions => KnownPath::VersionsRoot, RootTree::Votes => KnownPath::VotesRoot, RootTree::GroupActions => KnownPath::GroupActionsRoot, @@ -223,6 +235,57 @@ fn readable_key_info(known_path: KnownPath, key_info: &KeyInfo) -> (String, Opti _ => (hex_to_ascii(key), None), } } + KnownPath::TokenRoot if key.len() == 1 => match key[0] { + tokens::paths::TOKEN_DISTRIBUTIONS_KEY => { + (format!("Distribution({})", tokens::paths::TOKEN_DISTRIBUTIONS_KEY), Some(TokenDistributionRoot)) + } + tokens::paths::TOKEN_BALANCES_KEY => { + (format!("Balances({})", tokens::paths::TOKEN_BALANCES_KEY), Some(TokenBalancesRoot)) + } + tokens::paths::TOKEN_IDENTITY_INFO_KEY => { + (format!("IdentityInfo({})", tokens::paths::TOKEN_IDENTITY_INFO_KEY), Some(TokenIdentityInfoRoot)) + } + tokens::paths::TOKEN_STATUS_INFO_KEY => { + (format!("Status({})", tokens::paths::TOKEN_STATUS_INFO_KEY), Some(TokenStatusRoot)) + } + _ => (hex_to_ascii(key), None), + }, + KnownPath::TokenDistributionRoot if key.len() == 1 => match key[0] { + tokens::paths::TOKEN_TIMED_DISTRIBUTIONS_KEY => { + (format!("TimedDistribution({})", tokens::paths::TOKEN_TIMED_DISTRIBUTIONS_KEY), Some(TokenTimedDistributionRoot)) + } + tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_KEY => { + (format!("PerpetualDistribution({})", tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_KEY), Some(TokenPerpetualDistributionRoot)) + } + tokens::paths::TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY => { + (format!("PreProgrammedDistribution({})", tokens::paths::TOKEN_PRE_PROGRAMMED_DISTRIBUTIONS_KEY), Some(TokenPreProgrammedDistributionRoot)) + } + _ => (hex_to_ascii(key), None), + }, + KnownPath::TokenTimedDistributionRoot if key.len() == 1 => match key[0] { + tokens::paths::TOKEN_MS_TIMED_DISTRIBUTIONS_KEY => { + (format!("MillisecondTimedDistribution({})", tokens::paths::TOKEN_MS_TIMED_DISTRIBUTIONS_KEY), None) + } + tokens::paths::TOKEN_BLOCK_TIMED_DISTRIBUTIONS_KEY => { + (format!("BlockTimedDistribution({})", tokens::paths::TOKEN_BLOCK_TIMED_DISTRIBUTIONS_KEY), None) + } + tokens::paths::TOKEN_EPOCH_TIMED_DISTRIBUTIONS_KEY => { + (format!("EpochTimedDistribution({})", tokens::paths::TOKEN_EPOCH_TIMED_DISTRIBUTIONS_KEY), None) + } + _ => (hex_to_ascii(key), None), + }, + KnownPath::TokenPerpetualDistributionRoot if key.len() == 1 => match key[0] { + tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_INFO_KEY => { + (format!("PerpetualDistributionInfo({})", tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_INFO_KEY), None) + } + tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_FIRST_EVENT_KEY => { + (format!("PerpetualDistributionFirstEvent({})", tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_FIRST_EVENT_KEY), None) + } + tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_FOR_IDENTITIES_LAST_CLAIM_KEY => { + (format!("PerpetualDistributionLastClaim({})", tokens::paths::TOKEN_PERPETUAL_DISTRIBUTIONS_FOR_IDENTITIES_LAST_CLAIM_KEY), None) + } + _ => (hex_to_ascii(key), None), + }, _ => (hex_to_ascii(key), None), } } diff --git a/packages/rs-drive/src/verify/state_transition/verify_state_transition_was_executed_with_proof/v0/mod.rs b/packages/rs-drive/src/verify/state_transition/verify_state_transition_was_executed_with_proof/v0/mod.rs index c52ae174cee..575cba03851 100644 --- a/packages/rs-drive/src/verify/state_transition/verify_state_transition_was_executed_with_proof/v0/mod.rs +++ b/packages/rs-drive/src/verify/state_transition/verify_state_transition_was_executed_with_proof/v0/mod.rs @@ -4,6 +4,7 @@ use dpp::block::block_info::BlockInfo; use dpp::data_contract::accessors::v0::DataContractV0Getters; use dpp::data_contract::accessors::v1::DataContractV1Getters; use dpp::data_contract::associated_token::token_configuration::accessors::v0::TokenConfigurationV0Getters; +use dpp::data_contract::associated_token::token_keeps_history_rules::accessors::v0::TokenKeepsHistoryRulesV0Getters; use dpp::data_contract::config::v0::DataContractConfigGettersV0; use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; use dpp::data_contract::serialized_version::DataContractInSerializationFormat; @@ -35,7 +36,6 @@ use dpp::state_transition::batch_transition::batched_transition::document_update use dpp::state_transition::batch_transition::batched_transition::token_transition::{TokenTransition, TokenTransitionV0Methods, TOKEN_HISTORY_ID_BYTES}; use dpp::state_transition::batch_transition::token_base_transition::v0::v0_methods::TokenBaseTransitionV0Methods; use dpp::state_transition::batch_transition::token_config_update_transition::v0::v0_methods::TokenConfigUpdateTransitionV0Methods; -use dpp::state_transition::batch_transition::token_destroy_frozen_funds_transition::v0::v0_methods::TokenDestroyFrozenFundsTransitionV0Methods; use dpp::state_transition::batch_transition::token_emergency_action_transition::v0::v0_methods::TokenEmergencyActionTransitionV0Methods; use dpp::state_transition::batch_transition::token_freeze_transition::v0::v0_methods::TokenFreezeTransitionV0Methods; use dpp::state_transition::batch_transition::token_mint_transition::v0::v0_methods::TokenMintTransitionV0Methods; @@ -43,7 +43,7 @@ use dpp::state_transition::batch_transition::token_transfer_transition::v0::v0_m use dpp::state_transition::batch_transition::token_unfreeze_transition::v0::v0_methods::TokenUnfreezeTransitionV0Methods; use dpp::state_transition::masternode_vote_transition::accessors::MasternodeVoteTransitionAccessorsV0; use dpp::state_transition::proof_result::StateTransitionProofResult; -use dpp::state_transition::proof_result::StateTransitionProofResult::{VerifiedBalanceTransfer, VerifiedDataContract, VerifiedDocuments, VerifiedIdentity, VerifiedMasternodeVote, VerifiedPartialIdentity, VerifiedTokenActionWithDocument, VerifiedTokenBalance, VerifiedTokenBalanceAbsence, VerifiedTokenIdentitiesBalances, VerifiedTokenIdentityInfo, VerifiedTokenStatus}; +use dpp::state_transition::proof_result::StateTransitionProofResult::{VerifiedBalanceTransfer, VerifiedDataContract, VerifiedDocuments, VerifiedIdentity, VerifiedMasternodeVote, VerifiedPartialIdentity, VerifiedTokenActionWithDocument, VerifiedTokenBalance, VerifiedTokenIdentitiesBalances, VerifiedTokenIdentityInfo, VerifiedTokenStatus}; use dpp::tokens::info::v0::IdentityTokenInfoV0Accessors; use dpp::tokens::status::v0::TokenStatusV0Accessors; use dpp::voting::vote_polls::VotePoll; @@ -316,7 +316,8 @@ impl Drive { token_transition.base().token_contract_position(), )?; let keeps_historical_document = token_config.keeps_history(); - if keeps_historical_document { + + let historical_query = || { let query = SingleDocumentDriveQuery { contract_id: TOKEN_HISTORY_ID_BYTES, document_type_name, @@ -353,9 +354,12 @@ impl Drive { return Err(Error::Proof(ProofError::IncorrectProof(format!("proof of state transition execution did not show the correct historical document {}, {}", document, expected_document)))); } Ok((root_hash, VerifiedTokenActionWithDocument(document))) - } else { - match token_transition { - TokenTransition::Burn(_) => { + }; + match token_transition { + TokenTransition::Burn(_) => { + if keeps_historical_document.keeps_burning_history() { + historical_query() + } else { let (root_hash, Some(balance)) = Drive::verify_token_balance_for_identity_id( proof, @@ -366,11 +370,15 @@ impl Drive { )? else { return Err(Error::Proof(ProofError::IncorrectProof( - format!("proof did not contain token balance for identity {} expected to exist because of state transition (token burn)", owner_id)))); + format!("proof did not contain token balance for identity {} expected to exist because of state transition (token burn)", owner_id)))); }; Ok((root_hash, VerifiedTokenBalance(owner_id, balance))) } - TokenTransition::Mint(token_mint_transition) => { + } + TokenTransition::Mint(token_mint_transition) => { + if keeps_historical_document.keeps_minting_history() { + historical_query() + } else { let recipient_id = token_mint_transition.recipient_id(token_config)?; let (root_hash, Some(balance)) = @@ -383,11 +391,15 @@ impl Drive { )? else { return Err(Error::Proof(ProofError::IncorrectProof( - format!("proof did not contain token balance for identity {} expected to exist because of state transition (token mint)", recipient_id)))); + format!("proof did not contain token balance for identity {} expected to exist because of state transition (token mint)", recipient_id)))); }; Ok((root_hash, VerifiedTokenBalance(recipient_id, balance))) } - TokenTransition::Transfer(token_transfer_transition) => { + } + TokenTransition::Transfer(token_transfer_transition) => { + if keeps_historical_document.keeps_transfer_history() { + historical_query() + } else { let recipient_id = token_transfer_transition.recipient_id(); let identity_ids = [owner_id.to_buffer(), recipient_id.to_buffer()]; @@ -403,14 +415,18 @@ impl Drive { )?; let balances = balances.into_iter().map(|(id, maybe_balance)| { - let balance = maybe_balance.ok_or(Error::Proof(ProofError::IncorrectProof( - format!("proof did not contain token balance for identity {} expected to exist because of state transition (token transfer)", id))))?; - Ok((id, balance)) - }).collect::>()?; + let balance = maybe_balance.ok_or(Error::Proof(ProofError::IncorrectProof( + format!("proof did not contain token balance for identity {} expected to exist because of state transition (token transfer)", id))))?; + Ok((id, balance)) + }).collect::>()?; Ok((root_hash, VerifiedTokenIdentitiesBalances(balances))) } - TokenTransition::Freeze(token_freeze_transition) => { + } + TokenTransition::Freeze(token_freeze_transition) => { + if keeps_historical_document.keeps_freezing_history() { + historical_query() + } else { let (root_hash, Some(identity_token_info)) = Drive::verify_token_info_for_identity_id( proof, @@ -423,18 +439,22 @@ impl Drive { )? else { return Err(Error::Proof(ProofError::IncorrectProof( - format!("proof did not contain token info for identity {} expected to exist because of state transition (token freeze)", token_freeze_transition.frozen_identity_id())))); + format!("proof did not contain token info for identity {} expected to exist because of state transition (token freeze)", token_freeze_transition.frozen_identity_id())))); }; if !identity_token_info.frozen() { return Err(Error::Proof(ProofError::IncorrectProof( - format!("proof contained token info saying this token was not frozen for identity {}", token_freeze_transition.frozen_identity_id())))); + format!("proof contained token info saying this token was not frozen for identity {}", token_freeze_transition.frozen_identity_id())))); } Ok(( root_hash, VerifiedTokenIdentityInfo(owner_id, identity_token_info), )) } - TokenTransition::Unfreeze(token_unfreeze_transition) => { + } + TokenTransition::Unfreeze(token_unfreeze_transition) => { + if keeps_historical_document.keeps_freezing_history() { + historical_query() + } else { let (root_hash, Some(identity_token_info)) = Drive::verify_token_info_for_identity_id( proof, @@ -447,95 +467,71 @@ impl Drive { )? else { return Err(Error::Proof(ProofError::IncorrectProof( - format!("proof did not contain token info for identity {} expected to exist because of state transition (token freeze)", token_unfreeze_transition.frozen_identity_id())))); + format!("proof did not contain token info for identity {} expected to exist because of state transition (token freeze)", token_unfreeze_transition.frozen_identity_id())))); }; if identity_token_info.frozen() { return Err(Error::Proof(ProofError::IncorrectProof( - format!("proof contained token info saying this token was frozen for identity {} when we just unfroze it", token_unfreeze_transition.frozen_identity_id())))); + format!("proof contained token info saying this token was frozen for identity {} when we just unfroze it", token_unfreeze_transition.frozen_identity_id())))); } Ok(( root_hash, VerifiedTokenIdentityInfo(owner_id, identity_token_info), )) } - TokenTransition::DestroyFrozenFunds( - destroy_frozen_funds_transition, - ) => { - let (root_hash, maybe_token_amount) = - Drive::verify_token_balance_for_identity_id( - proof, - token_id.into_buffer(), - destroy_frozen_funds_transition - .frozen_identity_id() - .into_buffer(), - false, - platform_version, - )?; - if maybe_token_amount != Some(0) { - return Err(Error::Proof(ProofError::IncorrectProof( - format!("proof contained non-zero token balance for identity {} expected to be zero because of state transition (token destroy frozen funds)", destroy_frozen_funds_transition.frozen_identity_id())))); - }; - Ok(( - root_hash, - VerifiedTokenBalanceAbsence( - destroy_frozen_funds_transition.frozen_identity_id(), - ), - )) - } - TokenTransition::EmergencyAction(emergency_action_transition) => { - let (root_hash, Some(token_status)) = - Drive::verify_token_status( - proof, - token_id.into_buffer(), - false, - platform_version, - )? - else { - return Err(Error::Proof(ProofError::IncorrectProof( + } + TokenTransition::DestroyFrozenFunds(_) => historical_query(), + TokenTransition::EmergencyAction(emergency_action_transition) => { + let (root_hash, Some(token_status)) = Drive::verify_token_status( + proof, + token_id.into_buffer(), + true, + platform_version, + )? + else { + return Err(Error::Proof(ProofError::IncorrectProof( "proof did not contain token status expected to exist because of state transition (token emergency action)".to_string()))); - }; - if token_status.paused() - != emergency_action_transition.emergency_action().paused() - { - return Err(Error::Proof(ProofError::IncorrectProof( + }; + if token_status.paused() + != emergency_action_transition.emergency_action().paused() + { + return Err(Error::Proof(ProofError::IncorrectProof( format!("proof contained token status saying this token is {}paused, but we expected {}paused", if token_status.paused() {""} else {"not "}, if emergency_action_transition.emergency_action().paused() {""} else {"not "})))); - } - Ok((root_hash, VerifiedTokenStatus(token_status))) } - TokenTransition::ConfigUpdate(update) => { - let (root_hash, Some(updated_contract)) = - Drive::verify_contract( - proof, - Some(contract.config().keeps_history()), - false, - false, - contract.id().into_buffer(), - platform_version, - )? - else { - return Err(Error::Proof(ProofError::IncorrectProof( + Ok((root_hash, VerifiedTokenStatus(token_status))) + } + TokenTransition::ConfigUpdate(update) => { + let (root_hash, Some(updated_contract)) = Drive::verify_contract( + proof, + Some(contract.config().keeps_history()), + true, + false, + contract.id().into_buffer(), + platform_version, + )? + else { + return Err(Error::Proof(ProofError::IncorrectProof( "proof did not contain token status expected to exist because of state transition (token emergency action)".to_string()))); - }; - let mut expected_config = token_config.clone(); - expected_config.apply_token_configuration_item( - update.update_token_configuration_item().clone(), - ); - let new_token_config = updated_contract.expected_token_configuration( + }; + let mut expected_config = token_config.clone(); + expected_config.apply_token_configuration_item( + update.update_token_configuration_item().clone(), + ); + let new_token_config = updated_contract.expected_token_configuration( token_transition.base().token_contract_position(), ).map_err(|_| { Error::Proof(ProofError::CorruptedProof("returned proof does not have a token configuration, which should not be possible".to_string())) })?; - if new_token_config != &expected_config { - return Err(Error::Proof(ProofError::IncorrectProof( + if new_token_config != &expected_config { + return Err(Error::Proof(ProofError::IncorrectProof( format!( "expected token configuration does not match the token configuration from the proof: expected {}, found {}", expected_config, new_token_config )))); - } - Ok((root_hash, VerifiedDataContract(updated_contract))) } + Ok((root_hash, VerifiedDataContract(updated_contract))) } + TokenTransition::Claim(_) => historical_query(), } } } diff --git a/packages/rs-drive/tests/deterministic_root_hash.rs b/packages/rs-drive/tests/deterministic_root_hash.rs index 690fce96091..c8da8f8fb97 100644 --- a/packages/rs-drive/tests/deterministic_root_hash.rs +++ b/packages/rs-drive/tests/deterministic_root_hash.rs @@ -301,11 +301,16 @@ mod tests { // We expect a different app hash because data contract is not serialized the same way let expected_app_hash = match platform_version.protocol_version { - 0..7 => "1b80f4a9f00597b3f1ddca904b3cee67576868adcdd802c0a3f91e14209bb402", - _ => "387fe8e2298bb33e0ff79fd377eccb14109fb2534c7338c535bd74b5b8580580", + 0..=8 => "1b80f4a9f00597b3f1ddca904b3cee67576868adcdd802c0a3f91e14209bb402", + _ => "830cca9b7ffc3120c28301a8da4b64cac21b33555d59b6b09c867e3c4d4d0f92", }; - assert_eq!(hex::encode(app_hash), expected_app_hash); + assert_eq!( + hex::encode(app_hash), + expected_app_hash, + "not matching after contract insertion for protocol version {}", + platform_version.protocol_version + ); } /// Runs `test_root_hash_with_batches` 10 times. @@ -329,14 +334,14 @@ mod tests { /// Runs `test_root_hash_with_batches` 10 times. #[test] - fn test_deterministic_root_hash_with_batches_latest_platform_version() { + fn test_root_hash_with_batches_for_version() { let drive = setup_drive(None, None); - let platform_version = PlatformVersion::latest(); - let db_transaction = drive.grove.start_transaction(); - for _ in 0..10 { + for i in 1..=PlatformVersion::latest().protocol_version { + let platform_version = PlatformVersion::get(i).expect("expected platform version"); + test_root_hash_with_batches(&drive, &db_transaction, platform_version); drive diff --git a/packages/rs-drive/tests/query_tests.rs b/packages/rs-drive/tests/query_tests.rs index 8451908d687..bae51c85f92 100644 --- a/packages/rs-drive/tests/query_tests.rs +++ b/packages/rs-drive/tests/query_tests.rs @@ -1058,5975 +1058,5936 @@ pub fn setup_dpns_test_with_data(path: &str) -> (Drive, DataContract) { (drive, contract) } -#[cfg(feature = "server")] -#[test] -#[ignore] -fn test_query_many() { - let platform_version = PlatformVersion::latest(); - let (drive, contract) = setup_family_tests(1600, 73509, platform_version); - let db_transaction = drive.grove.start_transaction(); +#[cfg(test)] +mod tests { + use super::*; + #[cfg(feature = "server")] + #[test] + fn test_reference_proof_single_index() { + let (drive, contract) = setup_family_tests_only_first_name_index(1, 73509); - let platform_version = PlatformVersion::latest(); + let platform_version = PlatformVersion::latest(); - let people = Person::random_people(10, 73409); - for person in people { - let value = serde_json::to_value(person).expect("serialized person"); - let document_cbor = cbor_serializer::serializable_value_to_cbor(&value, Some(0)) - .expect("expected to serialize to cbor"); - let document = Document::from_cbor(document_cbor.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); - let document_type = contract - .document_type_for_name("person") - .expect("expected to get document type"); + let db_transaction = drive.grove.start_transaction(); - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); - } - drive - .grove - .commit_transaction(db_transaction) - .unwrap() - .expect("transaction should be committed"); -} + // A query getting all elements by firstName -#[cfg(feature = "server")] -#[test] -fn test_reference_proof_single_index() { - let (drive, contract) = setup_family_tests_only_first_name_index(1, 73509); + let query_value = json!({ + "where": [ + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } - let platform_version = PlatformVersion::latest(); + #[cfg(feature = "server")] + #[test] + fn test_non_existence_reference_proof_single_index() { + let (drive, contract) = setup_family_tests_only_first_name_index(0, 73509); - let db_transaction = drive.grove.start_transaction(); + let platform_version = PlatformVersion::latest(); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - // A query getting all elements by firstName - - let query_value = json!({ - "where": [ - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let db_transaction = drive.grove.start_transaction(); -#[cfg(feature = "server")] -#[test] -fn test_non_existence_reference_proof_single_index() { - let (drive, contract) = setup_family_tests_only_first_name_index(0, 73509); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); - let platform_version = PlatformVersion::latest(); + // A query getting all elements by firstName - let db_transaction = drive.grove.start_transaction(); + let query_value = json!({ + "where": [ + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - // A query getting all elements by firstName - - let query_value = json!({ - "where": [ - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + #[cfg(feature = "server")] + #[test] + fn test_family_basic_queries_first_version() { + let platform_version = PlatformVersion::first(); + let (drive, contract) = setup_family_tests(10, 73509, platform_version); -#[cfg(feature = "server")] -#[test] -fn test_family_basic_queries_first_version() { - let platform_version = PlatformVersion::first(); - let (drive, contract) = setup_family_tests(10, 73509, platform_version); + let db_transaction = drive.grove.start_transaction(); - let db_transaction = drive.grove.start_transaction(); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 32, 210, 24, 196, 148, 43, 20, 34, 0, 116, 183, 136, 32, 210, 163, 183, 214, 6, 152, + 86, 46, 45, 88, 13, 23, 41, 37, 70, 129, 119, 211, 12, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + let all_names = [ + "Adey".to_string(), + "Briney".to_string(), + "Cammi".to_string(), + "Celinda".to_string(), + "Dalia".to_string(), + "Gilligan".to_string(), + "Kevina".to_string(), + "Meta".to_string(), + "Noellyn".to_string(), + "Prissie".to_string(), + ]; + + // A query getting all elements by firstName + + let query_value = json!({ + "where": [ + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 32, 210, 24, 196, 148, 43, 20, 34, 0, 116, 183, 136, 32, 210, 163, 183, 214, 6, 152, 86, - 46, 45, 88, 13, 23, 41, 37, 70, 129, 119, 211, 12, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - let all_names = [ - "Adey".to_string(), - "Briney".to_string(), - "Cammi".to_string(), - "Celinda".to_string(), - "Dalia".to_string(), - "Gilligan".to_string(), - "Kevina".to_string(), - "Meta".to_string(), - "Noellyn".to_string(), - "Prissie".to_string(), - ]; - - // A query getting all elements by firstName - - let query_value = json!({ - "where": [ - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); + assert_eq!(names, all_names); - assert_eq!(names, all_names); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + // A query getting all people who's first name is Adey (which should exist) + let query_value = json!({ + "where": [ + ["firstName", "==", "Adey"] + ] + }); - // A query getting all people who's first name is Adey (which should exist) - let query_value = json!({ - "where": [ - ["firstName", "==", "Adey"] - ] - }); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(results.len(), 1); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(results.len(), 1); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + // A query getting all people who's first name is Adey and lastName Randolf - // A query getting all people who's first name is Adey and lastName Randolf + let query_value = json!({ + "where": [ + ["firstName", "==", "Adey"], + ["lastName", "==", "Randolf"] + ], + }); - let query_value = json!({ - "where": [ - ["firstName", "==", "Adey"], - ["lastName", "==", "Randolf"] - ], - }); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + assert_eq!(results.len(), 1); - assert_eq!(results.len(), 1); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, + let document = Document::from_bytes( + results.first().unwrap().as_slice(), person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), + platform_version, ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + .expect("we should be able to deserialize from bytes"); + let last_name = document + .get("lastName") + .expect("we should be able to get the last name") + .as_text() + .expect("last name must be a string"); - let document = Document::from_bytes( - results.first().unwrap().as_slice(), - person_document_type, - platform_version, - ) - .expect("we should be able to deserialize from bytes"); - let last_name = document - .get("lastName") - .expect("we should be able to get the last name") - .as_text() - .expect("last name must be a string"); - - assert_eq!(last_name, "Randolf"); - - // A query getting all people who's first name is in a range with a single element Adey, - // order by lastName (this should exist) - - let query_value = json!({ - "where": [ - ["firstName", "in", ["Adey"]] - ], - "orderBy": [ - ["firstName", "asc"], - ["lastName", "asc"] - ] - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + assert_eq!(last_name, "Randolf"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name is in a range with a single element Adey, + // order by lastName (this should exist) - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let query_value = json!({ + "where": [ + ["firstName", "in", ["Adey"]] + ], + "orderBy": [ + ["firstName", "asc"], + ["lastName", "asc"] + ] + }); - assert_eq!(results.len(), 1); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name is Adey, order by lastName (which should exist) - - let query_value = json!({ - "where": [ - ["firstName", "==", "Adey"] - ], - "orderBy": [ - ["lastName", "asc"] - ] - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + assert_eq!(results.len(), 1); - assert_eq!(results.len(), 1); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + // A query getting all people who's first name is Adey, order by lastName (which should exist) - let document = Document::from_bytes( - results.first().unwrap().as_slice(), - person_document_type, - platform_version, - ) - .expect("we should be able to deserialize from bytes"); - let last_name = document - .get("lastName") - .expect("we should be able to get the last name") - .as_text() - .expect("last name must be a string"); + let query_value = json!({ + "where": [ + ["firstName", "==", "Adey"] + ], + "orderBy": [ + ["lastName", "asc"] + ] + }); - assert_eq!(last_name, "Randolf"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - // A query getting all people who's first name is Chris (which is not exist) + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let query_value = json!({ - "where": [ - ["firstName", "==", "Chris"] - ] - }); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + assert_eq!(results.len(), 1); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, + let document = Document::from_bytes( + results.first().unwrap().as_slice(), person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), + platform_version, ) - .expect("query should be executed"); + .expect("we should be able to deserialize from bytes"); + let last_name = document + .get("lastName") + .expect("we should be able to get the last name") + .as_text() + .expect("last name must be a string"); - assert_eq!(results.len(), 0); + assert_eq!(last_name, "Randolf"); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + // A query getting all people who's first name is Chris (which is not exist) - // A query getting a middle name + let query_value = json!({ + "where": [ + ["firstName", "==", "Chris"] + ] + }); - let query_value = json!({ - "where": [ - ["middleName", "==", "Briggs"] - ] - }); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - - assert_eq!(results.len(), 1); - - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name is before Chris - - let query_value = json!({ - "where": [ - ["firstName", "<", "Chris"] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_before_chris = [ - "Adey".to_string(), - "Briney".to_string(), - "Cammi".to_string(), - "Celinda".to_string(), - ]; - assert_eq!(names, expected_names_before_chris); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name starts with C - - let query_value = json!({ - "where": [ - ["firstName", "StartsWith", "C"] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_starting_with_c = ["Cammi".to_string(), "Celinda".to_string()]; - assert_eq!(names, expected_names_starting_with_c); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name starts with C, but limit to 1 and be descending - - let query_value = json!({ - "where": [ - ["firstName", "StartsWith", "C"] - ], - "limit": 1, - "orderBy": [ - ["firstName", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_starting_with_c_desc_1 = ["Celinda".to_string()]; - assert_eq!(names, expected_names_starting_with_c_desc_1); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name is between Chris and Noellyn included - - let query_value = json!({ - "where": [ - ["firstName", ">", "Chris"], - ["firstName", "<=", "Noellyn"] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - assert_eq!(results.len(), 5); - - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_between_names = [ - "Dalia".to_string(), - "Gilligan".to_string(), - "Kevina".to_string(), - "Meta".to_string(), - "Noellyn".to_string(), - ]; - - assert_eq!(names, expected_between_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting back elements having specific names - - let query_value = json!({ - "where": [ - ["firstName", "in", names] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - assert_eq!(names, expected_between_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let query_value = json!({ - "where": [ - ["firstName", "in", names] - ], - "limit": 100, - "orderBy": [ - ["firstName", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_reversed_between_names = [ - "Noellyn".to_string(), - "Meta".to_string(), - "Kevina".to_string(), - "Gilligan".to_string(), - "Dalia".to_string(), - ]; - - assert_eq!(names, expected_reversed_between_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting back elements having specific names and over a certain age - - let query_value = json!({ - "where": [ - ["firstName", "in", names], - ["age", ">=", 45] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"], - ["age", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_45_over = [ - "Dalia".to_string(), - "Gilligan".to_string(), - "Kevina".to_string(), - "Meta".to_string(), - ]; - - assert_eq!(names, expected_names_45_over); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting back elements having specific names and over a certain age - - let query_value = json!({ - "where": [ - ["firstName", "in", names], - ["age", ">", 48] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"], - ["age", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - // Kevina is 48 so she should be now excluded, Dalia is 68, Gilligan is 49 and Meta is 59 - - let expected_names_over_48 = [ - "Dalia".to_string(), - "Gilligan".to_string(), - "Meta".to_string(), - ]; - - assert_eq!(names, expected_names_over_48); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let ages: HashMap = results - .into_iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let name = name_value - .as_text() - .expect("the first name should be a string") - .to_string(); - let age_value = document - .get("age") - .expect("we should be able to get the age"); - let age: u8 = age_value.to_integer().expect("expected u8 value"); - (name, age) - }) - .collect(); - - let meta_age = ages - .get("Meta") - .expect("we should be able to get Kevina as she is 48"); - - assert_eq!(*meta_age, 59); - - // fetching by $id - let mut rng = rand::rngs::StdRng::seed_from_u64(84594); - let id_bytes = bs58::decode("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD") - .into_vec() - .expect("this should decode"); - - let owner_id_bytes = bs58::decode("BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj") - .into_vec() - .expect("this should decode"); - - let fixed_person = Person { - id: id_bytes, - owner_id: owner_id_bytes, - first_name: String::from("Wisdom"), - middle_name: String::from("Madabuchukwu"), - last_name: String::from("Ogwu"), - age: rng.gen_range(0..85), - }; - let serialized_person = serde_json::to_value(fixed_person).expect("serialized person"); - let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) - .expect("expected to serialize to cbor"); - let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); + assert_eq!(results.len(), 0); - let document_type = contract - .document_type_for_name("person") - .expect("expected to get document type"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + // A query getting a middle name - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); - - let id_two_bytes = bs58::decode("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179") - .into_vec() - .expect("should decode"); - let owner_id_bytes = bs58::decode("Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA") - .into_vec() - .expect("this should decode"); - let next_person = Person { - id: id_two_bytes, - owner_id: owner_id_bytes, - first_name: String::from("Wdskdfslgjfdlj"), - middle_name: String::from("Mdsfdsgsdl"), - last_name: String::from("dkfjghfdk"), - age: rng.gen_range(0..85), - }; - let serialized_person = serde_json::to_value(next_person).expect("serialized person"); - let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) - .expect("expected to serialize to cbor"); - let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); + let query_value = json!({ + "where": [ + ["middleName", "==", "Briggs"] + ] + }); - let document_type = contract - .document_type_for_name("person") - .expect("expected to get document type"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_value = json!({ - "where": [ - ["$id", "in", vec![String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - }); + assert_eq!(results.len(), 1); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name is before Chris - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let query_value = json!({ + "where": [ + ["firstName", "<", "Chris"] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); - - assert_eq!(results.len(), 1); - - // TODO: Add test for proofs after transaction - // drive.grove.commit_transaction(db_transaction).expect("unable to commit transaction"); - // let (proof_root_hash, proof_results) = drive - // .query_documents_from_contract_as_grove_proof_only_get_elements( - // &contract, - // person_document_type, - // query_cbor.as_slice(), - // None, - // ) - // .expect("query should be executed"); - // assert_eq!(root_hash, proof_root_hash); - // assert_eq!(results, proof_results); - // let db_transaction = drive.grove.start_transaction(); - - // fetching by $id with order by - - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "orderBy": [["$id", "asc"]], - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); + + let expected_names_before_chris = [ + "Adey".to_string(), + "Briney".to_string(), + "Cammi".to_string(), + "Celinda".to_string(), + ]; + assert_eq!(names, expected_names_before_chris); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name starts with C - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let query_value = json!({ + "where": [ + ["firstName", "StartsWith", "C"] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); - - assert_eq!(results.len(), 2); - - let last_person = Document::from_bytes( - results.first().unwrap().as_slice(), - document_type, - platform_version, - ) - .expect("we should be able to deserialize the document"); - - assert_eq!( - last_person.id().to_vec(), - vec![ - 76, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, 189, - 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, - ] - ); - - // fetching by $id with order by desc + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "orderBy": [["$id", "desc"]], - }); + let expected_names_starting_with_c = ["Cammi".to_string(), "Celinda".to_string()]; + assert_eq!(names, expected_names_starting_with_c); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name starts with C, but limit to 1 and be descending - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let query_value = json!({ + "where": [ + ["firstName", "StartsWith", "C"] + ], + "limit": 1, + "orderBy": [ + ["firstName", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); - - assert_eq!(results.len(), 2); - - let last_person = Document::from_bytes( - results.first().unwrap().as_slice(), - document_type, - platform_version, - ) - .expect("we should be able to deserialize the document"); - - assert_eq!( - last_person.id().to_vec(), - vec![ - 140, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, - 189, 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, - ] - ); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - // - // // fetching with empty where and orderBy - // - let query_value = json!({}); + let expected_names_starting_with_c_desc_1 = ["Celinda".to_string()]; + assert_eq!(names, expected_names_starting_with_c_desc_1); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name is between Chris and Noellyn included - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let query_value = json!({ + "where": [ + ["firstName", ">", "Chris"], + ["firstName", "<=", "Noellyn"] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + assert_eq!(results.len(), 5); + + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - assert_eq!(results.len(), 12); + let expected_between_names = [ + "Dalia".to_string(), + "Gilligan".to_string(), + "Kevina".to_string(), + "Meta".to_string(), + "Noellyn".to_string(), + ]; - // - // // fetching with empty where and orderBy $id desc - // - let query_value = json!({ - "orderBy": [["$id", "desc"]] - }); + assert_eq!(names, expected_between_names); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting back elements having specific names - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let query_value = json!({ + "where": [ + ["firstName", "in", names] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); - - assert_eq!(results.len(), 12); - - let last_person = Document::from_bytes( - results.first().unwrap().as_slice(), - document_type, - platform_version, - ) - .expect("we should be able to deserialize the document"); - - assert_eq!( - last_person.id().to_vec(), - vec![ - 249, 170, 70, 122, 181, 31, 35, 176, 175, 131, 70, 150, 250, 223, 194, 203, 175, 200, - 107, 252, 199, 227, 154, 105, 89, 57, 38, 85, 236, 192, 254, 88, - ] - ); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - // - // // fetching with ownerId in a set of values - // - let query_value = json!({ - "where": [ - ["$ownerId", "in", ["BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj", "Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA"]] - ], - "orderBy": [["$ownerId", "desc"]] - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + assert_eq!(names, expected_between_names); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let query_value = json!({ + "where": [ + ["firstName", "in", names] + ], + "limit": 100, + "orderBy": [ + ["firstName", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - assert_eq!(results.len(), 2); + let expected_reversed_between_names = [ + "Noellyn".to_string(), + "Meta".to_string(), + "Kevina".to_string(), + "Gilligan".to_string(), + "Dalia".to_string(), + ]; - // - // // fetching with ownerId equal and orderBy - // - let query_value = json!({ - "where": [ - ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"] - ], - "orderBy": [["$ownerId", "asc"]] - }); + assert_eq!(names, expected_reversed_between_names); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting back elements having specific names and over a certain age - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let query_value = json!({ + "where": [ + ["firstName", "in", names], + ["age", ">=", 45] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"], + ["age", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - assert_eq!(results.len(), 1); + let expected_names_45_over = [ + "Dalia".to_string(), + "Gilligan".to_string(), + "Kevina".to_string(), + "Meta".to_string(), + ]; - // query empty contract with nested path queries + assert_eq!(names, expected_names_45_over); - let dashpay_contract = json_document_to_contract( - "tests/supporting_files/contract/dashpay/dashpay-contract.json", - false, - platform_version, - ) - .expect("expected to get cbor document"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - drive - .apply_contract( - &dashpay_contract, - BlockInfo::default(), - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, + // A query getting back elements having specific names and over a certain age + + let query_value = json!({ + "where": [ + ["firstName", "in", names], + ["age", ">", 48] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"], + ["age", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, ) - .expect("expected to apply contract successfully"); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); + + // Kevina is 48 so she should be now excluded, Dalia is 68, Gilligan is 49 and Meta is 59 + + let expected_names_over_48 = [ + "Dalia".to_string(), + "Gilligan".to_string(), + "Meta".to_string(), + ]; + + assert_eq!(names, expected_names_over_48); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + let ages: HashMap = results + .into_iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let name = name_value + .as_text() + .expect("the first name should be a string") + .to_string(); + let age_value = document + .get("age") + .expect("we should be able to get the age"); + let age: u8 = age_value.to_integer().expect("expected u8 value"); + (name, age) + }) + .collect(); + + let meta_age = ages + .get("Meta") + .expect("we should be able to get Kevina as she is 48"); + + assert_eq!(*meta_age, 59); + + // fetching by $id + let mut rng = rand::rngs::StdRng::seed_from_u64(84594); + let id_bytes = bs58::decode("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD") + .into_vec() + .expect("this should decode"); + + let owner_id_bytes = bs58::decode("BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj") + .into_vec() + .expect("this should decode"); + + let fixed_person = Person { + id: id_bytes, + owner_id: owner_id_bytes, + first_name: String::from("Wisdom"), + middle_name: String::from("Madabuchukwu"), + last_name: String::from("Ogwu"), + age: rng.gen_range(0..85), + }; + let serialized_person = serde_json::to_value(fixed_person).expect("serialized person"); + let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) + .expect("expected to serialize to cbor"); + let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); - let query_value = json!({ - "where": [ - ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], - ["toUserId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], - ], - }); + let document_type = contract + .document_type_for_name("person") + .expect("expected to get document type"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &dashpay_contract, - dashpay_contract - .document_type_for_name("contactRequest") - .expect("should have contact document type"), - &query_cbor, - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); - assert_eq!(results.len(), 0); + let id_two_bytes = bs58::decode("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179") + .into_vec() + .expect("should decode"); + let owner_id_bytes = bs58::decode("Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA") + .into_vec() + .expect("this should decode"); + let next_person = Person { + id: id_two_bytes, + owner_id: owner_id_bytes, + first_name: String::from("Wdskdfslgjfdlj"), + middle_name: String::from("Mdsfdsgsdl"), + last_name: String::from("dkfjghfdk"), + age: rng.gen_range(0..85), + }; + let serialized_person = serde_json::to_value(next_person).expect("serialized person"); + let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) + .expect("expected to serialize to cbor"); + let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); - // using non existing document in startAt + let document_type = contract + .document_type_for_name("person") + .expect("expected to get document type"); - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("5A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178")]], - ], - "orderBy": [["$id", "asc"]], - }); + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let query_value = json!({ + "where": [ + ["$id", "in", vec![String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + }); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - assert_eq!(results.len(), 1); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - // using non existing document in startAt + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + + assert_eq!(results.len(), 1); + + // TODO: Add test for proofs after transaction + // drive.grove.commit_transaction(db_transaction).expect("unable to commit transaction"); + // let (proof_root_hash, proof_results) = drive + // .query_documents_from_contract_as_grove_proof_only_get_elements( + // &contract, + // person_document_type, + // query_cbor.as_slice(), + // None, + // ) + // .expect("query should be executed"); + // assert_eq!(root_hash, proof_root_hash); + // assert_eq!(results, proof_results); + // let db_transaction = drive.grove.start_transaction(); + + // fetching by $id with order by + + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "orderBy": [["$id", "asc"]], + }); - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "startAt": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), - "orderBy": [["$id", "asc"]], - }); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let result = drive.query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ); + assert_eq!(results.len(), 2); - assert!( - matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAt document not found") - ); + let last_person = Document::from_bytes( + results.first().unwrap().as_slice(), + document_type, + platform_version, + ) + .expect("we should be able to deserialize the document"); - // using non existing document in startAfter + assert_eq!( + last_person.id().to_vec(), + vec![ + 76, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, + 189, 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, + ] + ); - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "startAfter": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), - "orderBy": [["$id", "asc"]], - }); + // fetching by $id with order by desc - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "orderBy": [["$id", "desc"]], + }); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let result = drive.query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - assert!( - matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAfter document not found") - ); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - // validate eventual root hash + assert_eq!(results.len(), 2); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - assert_eq!( - root_hash.as_slice(), - vec![ - 251, 69, 177, 93, 128, 236, 106, 87, 205, 123, 80, 61, 44, 107, 186, 193, 22, 192, 239, - 7, 107, 110, 97, 197, 59, 245, 26, 12, 63, 91, 248, 231 - ], - ); -} + let last_person = Document::from_bytes( + results.first().unwrap().as_slice(), + document_type, + platform_version, + ) + .expect("we should be able to deserialize the document"); -#[cfg(feature = "server")] -#[test] -fn test_family_basic_queries() { - let platform_version = PlatformVersion::latest(); - let (drive, contract) = setup_family_tests(10, 73509, platform_version); + assert_eq!( + last_person.id().to_vec(), + vec![ + 140, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, + 189, 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, + ] + ); - let db_transaction = drive.grove.start_transaction(); + // + // // fetching with empty where and orderBy + // + let query_value = json!({}); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 63, 90, 74, 129, 70, 204, 232, 67, 190, 85, 133, 79, 254, 245, 203, 180, 77, 67, 94, 22, - 180, 99, 51, 251, 82, 117, 211, 14, 136, 51, 228, 177, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - let all_names = [ - "Adey".to_string(), - "Briney".to_string(), - "Cammi".to_string(), - "Celinda".to_string(), - "Dalia".to_string(), - "Gilligan".to_string(), - "Kevina".to_string(), - "Meta".to_string(), - "Noellyn".to_string(), - "Prissie".to_string(), - ]; - - // A query getting all elements by firstName - - let query_value = json!({ - "where": [ - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - assert_eq!(names, all_names); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - // A query getting all people who's first name is Adey (which should exist) - let query_value = json!({ - "where": [ - ["firstName", "==", "Adey"] - ] - }); + assert_eq!(results.len(), 12); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + // + // // fetching with empty where and orderBy $id desc + // + let query_value = json!({ + "orderBy": [["$id", "desc"]] + }); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(results.len(), 1); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + + assert_eq!(results.len(), 12); + + let last_person = Document::from_bytes( + results.first().unwrap().as_slice(), + document_type, + platform_version, ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + .expect("we should be able to deserialize the document"); - // A query getting all people who's first name is Adey and lastName Randolf + assert_eq!( + last_person.id().to_vec(), + vec![ + 249, 170, 70, 122, 181, 31, 35, 176, 175, 131, 70, 150, 250, 223, 194, 203, 175, + 200, 107, 252, 199, 227, 154, 105, 89, 57, 38, 85, 236, 192, 254, 88, + ] + ); - let query_value = json!({ - "where": [ - ["firstName", "==", "Adey"], - ["lastName", "==", "Randolf"] - ], - }); + // + // // fetching with ownerId in a set of values + // + let query_value = json!({ + "where": [ + ["$ownerId", "in", ["BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj", "Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA"]] + ], + "orderBy": [["$ownerId", "desc"]] + }); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - assert_eq!(results.len(), 1); + assert_eq!(results.len(), 2); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + // + // // fetching with ownerId equal and orderBy + // + let query_value = json!({ + "where": [ + ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"] + ], + "orderBy": [["$ownerId", "asc"]] + }); - let document = Document::from_bytes( - results.first().unwrap().as_slice(), - person_document_type, - platform_version, - ) - .expect("we should be able to deserialize from bytes"); - let last_name = document - .get("lastName") - .expect("we should be able to get the last name") - .as_text() - .expect("last name must be a string"); - - assert_eq!(last_name, "Randolf"); - - // A query getting all people who's first name is in a range with a single element Adey, - // order by lastName (this should exist) - - let query_value = json!({ - "where": [ - ["firstName", "in", ["Adey"]] - ], - "orderBy": [ - ["firstName", "asc"], - ["lastName", "asc"] - ] - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - assert_eq!(results.len(), 1); + assert_eq!(results.len(), 1); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), + // query empty contract with nested path queries + + let dashpay_contract = json_document_to_contract( + "tests/supporting_files/contract/dashpay/dashpay-contract.json", + false, + platform_version, ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name is Adey, order by lastName (which should exist) - - let query_value = json!({ - "where": [ - ["firstName", "==", "Adey"] - ], - "orderBy": [ - ["lastName", "asc"] - ] - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + .expect("expected to get cbor document"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + drive + .apply_contract( + &dashpay_contract, + BlockInfo::default(), + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("expected to apply contract successfully"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let query_value = json!({ + "where": [ + ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], + ["toUserId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], + ], + }); - assert_eq!(results.len(), 1); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &dashpay_contract, + dashpay_contract + .document_type_for_name("contactRequest") + .expect("should have contact document type"), + &query_cbor, + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let document = Document::from_bytes( - results.first().unwrap().as_slice(), - person_document_type, - platform_version, - ) - .expect("we should be able to deserialize from bytes"); - let last_name = document - .get("lastName") - .expect("we should be able to get the last name") - .as_text() - .expect("last name must be a string"); + assert_eq!(results.len(), 0); + + // using non existing document in startAt + + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("5A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178")]], + ], + "orderBy": [["$id", "asc"]], + }); - assert_eq!(last_name, "Randolf"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - // A query getting all people who's first name is Chris (which is not exist) + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let query_value = json!({ - "where": [ - ["firstName", "==", "Chris"] - ] - }); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + assert_eq!(results.len(), 1); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // using non existing document in startAt - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "startAt": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), + "orderBy": [["$id", "asc"]], + }); + + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - assert_eq!(results.len(), 0); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + let result = drive.query_documents_cbor_from_contract( &contract, person_document_type, query_cbor.as_slice(), None, - None, + Some(&db_transaction), Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + ); - // A query getting a middle name + assert!( + matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAt document not found") + ); - let query_value = json!({ - "where": [ - ["middleName", "==", "Briggs"] - ] - }); + // using non existing document in startAfter - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "startAfter": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), + "orderBy": [["$id", "asc"]], + }); + + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let result = drive.query_documents_cbor_from_contract( &contract, person_document_type, query_cbor.as_slice(), None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - - assert_eq!(results.len(), 1); - - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name is before Chris - - let query_value = json!({ - "where": [ - ["firstName", "<", "Chris"] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_before_chris = [ - "Adey".to_string(), - "Briney".to_string(), - "Cammi".to_string(), - "Celinda".to_string(), - ]; - assert_eq!(names, expected_names_before_chris); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name starts with C - - let query_value = json!({ - "where": [ - ["firstName", "StartsWith", "C"] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_starting_with_c = ["Cammi".to_string(), "Celinda".to_string()]; - assert_eq!(names, expected_names_starting_with_c); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name starts with C, but limit to 1 and be descending - - let query_value = json!({ - "where": [ - ["firstName", "StartsWith", "C"] - ], - "limit": 1, - "orderBy": [ - ["firstName", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_starting_with_c_desc_1 = ["Celinda".to_string()]; - assert_eq!(names, expected_names_starting_with_c_desc_1); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all people who's first name is between Chris and Noellyn included - - let query_value = json!({ - "where": [ - ["firstName", ">", "Chris"], - ["firstName", "<=", "Noellyn"] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - assert_eq!(results.len(), 5); - - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_between_names = [ - "Dalia".to_string(), - "Gilligan".to_string(), - "Kevina".to_string(), - "Meta".to_string(), - "Noellyn".to_string(), - ]; - - assert_eq!(names, expected_between_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting back elements having specific names - - let query_value = json!({ - "where": [ - ["firstName", "in", names] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - assert_eq!(names, expected_between_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let query_value = json!({ - "where": [ - ["firstName", "in", names] - ], - "limit": 100, - "orderBy": [ - ["firstName", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_reversed_between_names = [ - "Noellyn".to_string(), - "Meta".to_string(), - "Kevina".to_string(), - "Gilligan".to_string(), - "Dalia".to_string(), - ]; - - assert_eq!(names, expected_reversed_between_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting back elements having specific names and over a certain age - - let query_value = json!({ - "where": [ - ["firstName", "in", names], - ["age", ">=", 45] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"], - ["age", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_names_45_over = [ - "Dalia".to_string(), - "Gilligan".to_string(), - "Kevina".to_string(), - "Meta".to_string(), - ]; - - assert_eq!(names, expected_names_45_over); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting back elements having specific names and over a certain age - - let query_value = json!({ - "where": [ - ["firstName", "in", names], - ["age", ">", 48] - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"], - ["age", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - // Kevina is 48 so she should be now excluded, Dalia is 68, Gilligan is 49 and Meta is 59 - - let expected_names_over_48 = [ - "Dalia".to_string(), - "Gilligan".to_string(), - "Meta".to_string(), - ]; - - assert_eq!(names, expected_names_over_48); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let ages: HashMap = results - .into_iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let name = name_value - .as_text() - .expect("the first name should be a string") - .to_string(); - let age_value = document - .get("age") - .expect("we should be able to get the age"); - let age: u8 = age_value.to_integer().expect("expected u8 value"); - (name, age) - }) - .collect(); - - let meta_age = ages - .get("Meta") - .expect("we should be able to get Kevina as she is 48"); - - assert_eq!(*meta_age, 59); - - // fetching by $id - let mut rng = rand::rngs::StdRng::seed_from_u64(84594); - let id_bytes = bs58::decode("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD") - .into_vec() - .expect("this should decode"); - - let owner_id_bytes = bs58::decode("BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj") - .into_vec() - .expect("this should decode"); - - let fixed_person = Person { - id: id_bytes, - owner_id: owner_id_bytes, - first_name: String::from("Wisdom"), - middle_name: String::from("Madabuchukwu"), - last_name: String::from("Ogwu"), - age: rng.gen_range(0..85), - }; - let serialized_person = serde_json::to_value(fixed_person).expect("serialized person"); - let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) - .expect("expected to serialize to cbor"); - let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); - - let document_type = contract - .document_type_for_name("person") - .expect("expected to get document type"); - - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); - - let id_two_bytes = bs58::decode("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179") - .into_vec() - .expect("should decode"); - let owner_id_bytes = bs58::decode("Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA") - .into_vec() - .expect("this should decode"); - let next_person = Person { - id: id_two_bytes, - owner_id: owner_id_bytes, - first_name: String::from("Wdskdfslgjfdlj"), - middle_name: String::from("Mdsfdsgsdl"), - last_name: String::from("dkfjghfdk"), - age: rng.gen_range(0..85), - }; - let serialized_person = serde_json::to_value(next_person).expect("serialized person"); - let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) - .expect("expected to serialize to cbor"); - let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); + Some(platform_version.protocol_version), + ); - let document_type = contract - .document_type_for_name("person") - .expect("expected to get document type"); + assert!( + matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAfter document not found") + ); - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + // validate eventual root hash - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); - let query_value = json!({ - "where": [ - ["$id", "in", vec![String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - }); + assert_eq!( + root_hash.as_slice(), + vec![ + 251, 69, 177, 93, 128, 236, 106, 87, 205, 123, 80, 61, 44, 107, 186, 193, 22, 192, + 239, 7, 107, 110, 97, 197, 59, 245, 26, 12, 63, 91, 248, 231 + ], + ); + } - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + #[cfg(feature = "server")] + #[test] + fn test_family_basic_queries() { + let platform_version = PlatformVersion::latest(); + let (drive, contract) = setup_family_tests(10, 73509, platform_version); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let db_transaction = drive.grove.start_transaction(); - let (results, _, _) = drive - .query_documents_cbor_from_contract( + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 59, 253, 119, 177, 148, 100, 153, 121, 228, 238, 250, 185, 103, 53, 113, 8, 30, 192, + 75, 150, 153, 2, 24, 109, 93, 91, 97, 75, 106, 35, 29, 252, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + let all_names = [ + "Adey".to_string(), + "Briney".to_string(), + "Cammi".to_string(), + "Celinda".to_string(), + "Dalia".to_string(), + "Gilligan".to_string(), + "Kevina".to_string(), + "Meta".to_string(), + "Noellyn".to_string(), + "Prissie".to_string(), + ]; + + // A query getting all elements by firstName + + let query_value = json!({ + "where": [ + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + &drive.config, ) - .expect("query should be executed"); - - assert_eq!(results.len(), 1); - - // TODO: Add test for proofs after transaction - // drive.grove.commit_transaction(db_transaction).expect("unable to commit transaction"); - // let (proof_root_hash, proof_results) = drive - // .query_documents_from_contract_as_grove_proof_only_get_elements( - // &contract, - // person_document_type, - // query_cbor.as_slice(), - // None, - // ) - // .expect("query should be executed"); - // assert_eq!(root_hash, proof_root_hash); - // assert_eq!(results, proof_results); - // let db_transaction = drive.grove.start_transaction(); - - // fetching by $id with order by - - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "orderBy": [["$id", "asc"]], - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + assert_eq!(names, all_names); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - assert_eq!(results.len(), 2); + // A query getting all people who's first name is Adey (which should exist) + let query_value = json!({ + "where": [ + ["firstName", "==", "Adey"] + ] + }); - let last_person = Document::from_bytes( - results.first().unwrap().as_slice(), - document_type, - platform_version, - ) - .expect("we should be able to deserialize the document"); - - assert_eq!( - last_person.id().to_vec(), - vec![ - 76, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, 189, - 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, - ] - ); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - // fetching by $id with order by desc + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "orderBy": [["$id", "desc"]], - }); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(results.len(), 1); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name is Adey and lastName Randolf - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let query_value = json!({ + "where": [ + ["firstName", "==", "Adey"], + ["lastName", "==", "Randolf"] + ], + }); - assert_eq!(results.len(), 2); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let last_person = Document::from_bytes( - results.first().unwrap().as_slice(), - document_type, - platform_version, - ) - .expect("we should be able to deserialize the document"); - - assert_eq!( - last_person.id().to_vec(), - vec![ - 140, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, - 189, 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, - ] - ); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - // - // // fetching with empty where and orderBy - // - let query_value = json!({}); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + assert_eq!(results.len(), 1); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, + let document = Document::from_bytes( + results.first().unwrap().as_slice(), person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + platform_version, ) - .expect("query should be executed"); + .expect("we should be able to deserialize from bytes"); + let last_name = document + .get("lastName") + .expect("we should be able to get the last name") + .as_text() + .expect("last name must be a string"); - assert_eq!(results.len(), 12); + assert_eq!(last_name, "Randolf"); - // - // // fetching with empty where and orderBy $id desc - // - let query_value = json!({ - "orderBy": [["$id", "desc"]] - }); + // A query getting all people who's first name is in a range with a single element Adey, + // order by lastName (this should exist) - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let query_value = json!({ + "where": [ + ["firstName", "in", ["Adey"]] + ], + "orderBy": [ + ["firstName", "asc"], + ["lastName", "asc"] + ] + }); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - assert_eq!(results.len(), 12); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let last_person = Document::from_bytes( - results.first().unwrap().as_slice(), - document_type, - platform_version, - ) - .expect("we should be able to deserialize the document"); - - assert_eq!( - last_person.id().to_vec(), - vec![ - 249, 170, 70, 122, 181, 31, 35, 176, 175, 131, 70, 150, 250, 223, 194, 203, 175, 200, - 107, 252, 199, 227, 154, 105, 89, 57, 38, 85, 236, 192, 254, 88, - ] - ); + assert_eq!(results.len(), 1); - // - // // fetching with ownerId in a set of values - // - let query_value = json!({ - "where": [ - ["$ownerId", "in", ["BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj", "Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA"]] - ], - "orderBy": [["$ownerId", "desc"]] - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name is Adey, order by lastName (which should exist) - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let query_value = json!({ + "where": [ + ["firstName", "==", "Adey"] + ], + "orderBy": [ + ["lastName", "asc"] + ] + }); + + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - assert_eq!(results.len(), 2); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - // - // // fetching with ownerId equal and orderBy - // - let query_value = json!({ - "where": [ - ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"] - ], - "orderBy": [["$ownerId", "asc"]] - }); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + assert_eq!(results.len(), 1); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, + let document = Document::from_bytes( + results.first().unwrap().as_slice(), person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), + platform_version, ) - .expect("query should be executed"); + .expect("we should be able to deserialize from bytes"); + let last_name = document + .get("lastName") + .expect("we should be able to get the last name") + .as_text() + .expect("last name must be a string"); - assert_eq!(results.len(), 1); + assert_eq!(last_name, "Randolf"); - // query empty contract with nested path queries + // A query getting all people who's first name is Chris (which is not exist) - let dashpay_contract = json_document_to_contract( - "tests/supporting_files/contract/dashpay/dashpay-contract.json", - false, - platform_version, - ) - .expect("expected to get cbor document"); - - drive - .apply_contract( - &dashpay_contract, - BlockInfo::default(), - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, - ) - .expect("expected to apply contract successfully"); - - let query_value = json!({ - "where": [ - ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], - ["toUserId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], - ], - }); + let query_value = json!({ + "where": [ + ["firstName", "==", "Chris"] + ] + }); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &dashpay_contract, - dashpay_contract - .document_type_for_name("contactRequest") - .expect("should have contact document type"), - &query_cbor, - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - assert_eq!(results.len(), 0); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - // using non existing document in startAt + assert_eq!(results.len(), 0); - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("5A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178")]], - ], - "orderBy": [["$id", "asc"]], - }); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + // A query getting a middle name - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + let query_value = json!({ + "where": [ + ["middleName", "==", "Briggs"] + ] + }); - let (results, _, _) = drive - .query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - assert_eq!(results.len(), 1); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - // using non existing document in startAt + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "startAt": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), - "orderBy": [["$id", "asc"]], - }); + assert_eq!(results.len(), 1); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); + // A query getting all people who's first name is before Chris - let result = drive.query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ); - - assert!( - matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAt document not found") - ); - - // using non existing document in startAfter - - let query_value = json!({ - "where": [ - ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], - ], - "startAfter": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), - "orderBy": [["$id", "asc"]], - }); - - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - - let result = drive.query_documents_cbor_from_contract( - &contract, - person_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ); - - assert!( - matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAfter document not found") - ); - - // validate eventual root hash - - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - assert_eq!( - root_hash.as_slice(), - vec![ - 187, 202, 114, 108, 228, 21, 246, 191, 11, 30, 112, 178, 38, 36, 145, 109, 238, 11, - 210, 210, 0, 227, 175, 151, 149, 166, 143, 15, 144, 255, 82, 229, - ], - ); -} - -#[cfg(feature = "server")] -#[test] -fn test_family_person_update() { - let platform_version = PlatformVersion::latest(); - let (drive, contract) = setup_family_tests(10, 73509, platform_version); - - let epoch_change_fee_version_test: Lazy = - Lazy::new(|| BTreeMap::from([(0, FeeVersion::first())])); - - let db_transaction = drive.grove.start_transaction(); - - let mut rng = rand::rngs::StdRng::seed_from_u64(84594); - let id_bytes = bs58::decode("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD") - .into_vec() - .expect("this should decode"); - - let owner_id_bytes = bs58::decode("BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj") - .into_vec() - .expect("this should decode"); - - let fixed_person = Person { - id: id_bytes.clone(), - owner_id: owner_id_bytes.clone(), - first_name: String::from("Wisdom"), - middle_name: String::from("Madman"), - last_name: String::from("Ogwu"), - age: rng.gen_range(0..85), - }; - let serialized_person = serde_json::to_value(fixed_person).expect("serialized person"); - let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) - .expect("expected to serialize to cbor"); - let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); - - let document_type = contract - .document_type_for_name("person") - .expect("expected to get document type"); - - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document, storage_flags.clone())), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); - - let updated_fixed_person = Person { - id: id_bytes, - owner_id: owner_id_bytes, - first_name: String::from("Wisdom"), - middle_name: String::from("Madabuchukwu"), - last_name: String::from("Ogwu"), - age: rng.gen_range(0..85), - }; - let serialized_person = serde_json::to_value(updated_fixed_person).expect("serialized person"); - let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) - .expect("expected to serialize to cbor"); - let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); - - let fee = drive - .update_document_for_contract( - &document, + let query_value = json!({ + "where": [ + ["firstName", "<", "Chris"] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), &contract, - document_type, - None, - BlockInfo::genesis(), - true, - storage_flags, - Some(&db_transaction), - platform_version, - Some(&epoch_change_fee_version_test), + person_document_type, + &drive.config, ) - .expect("expected to override document"); - assert!(fee.storage_fee > 0); - - let query_value = json!({ - "where": [ - ["firstName", "==", "Wisdom"] - ], - "limit": 1, - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - - assert_eq!(results.len(), 1); - - drive - .commit_transaction(db_transaction, &platform_version.drive) - .expect("expected to commit transaction"); - - let (proof, _fee) = query - .clone() - .execute_with_proof(&drive, None, None, platform_version) - .expect("expected proof to be generated"); - - let (_root_hash, documents) = query - .verify_proof(&proof, platform_version) - .expect("expected to verify proof"); - - assert_eq!(documents.len(), 1); -} - -#[cfg(feature = "server")] -#[test] -fn test_family_starts_at_queries() { - let platform_version = PlatformVersion::latest(); - let (drive, contract) = setup_family_tests(10, 73509, platform_version); - - let db_transaction = drive.grove.start_transaction(); - - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 63, 90, 74, 129, 70, 204, 232, 67, 190, 85, 133, 79, 254, 245, 203, 180, 77, 67, 94, 22, - 180, 99, 51, 251, 82, 117, 211, 14, 136, 51, 228, 177, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // let all_names = [ - // "Adey".to_string(), - // "Briney".to_string(), - // "Cammi".to_string(), - // "Celinda".to_string(), - // "Dalia".to_string(), - // "Gilligan".to_string(), - // "Kevina".to_string(), - // "Meta".to_string(), - // "Noellyn".to_string(), - // "Prissie".to_string(), - // ]; - - let kevina_encoded_id = "B4zLoYmSGz5SyD7QjAvcjAWtzGCfnQDCti3o7V2ZBDNo".to_string(); - - let query_value = json!({ - "where": [ - ["firstName", ">", "Chris"], - ["firstName", "<=", "Noellyn"] - ], - "startAt": kevina_encoded_id, //Kevina - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - - let reduced_names_after: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_reduced_names = [ - "Kevina".to_string(), - "Meta".to_string(), - "Noellyn".to_string(), - ]; - - assert_eq!(reduced_names_after, expected_reduced_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // Now lets try startsAfter - - let query_value = json!({ - "where": [ - ["firstName", ">", "Chris"], - ["firstName", "<=", "Noellyn"] - ], - "startAfter": kevina_encoded_id, //Kevina - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - - let reduced_names_after: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_reduced_names = ["Meta".to_string(), "Noellyn".to_string()]; - - assert_eq!(reduced_names_after, expected_reduced_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let query_value = json!({ - "where": [ - ["firstName", ">", "Chris"], - ["firstName", "<=", "Noellyn"] - ], - "startAt": kevina_encoded_id, //Kevina - "limit": 100, - "orderBy": [ - ["firstName", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - - let reduced_names_after: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); - - let expected_reduced_names = [ - "Kevina".to_string(), - "Gilligan".to_string(), - "Dalia".to_string(), - ]; - - assert_eq!(reduced_names_after, expected_reduced_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // Now lets try startsAfter - - let query_value = json!({ - "where": [ - ["firstName", ">", "Chris"], - ["firstName", "<=", "Noellyn"] - ], - "startAfter": kevina_encoded_id, //Kevina - "limit": 100, - "orderBy": [ - ["firstName", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, None, platform_version) - .expect("proof should be executed"); - assert_eq!(results.len(), 2); - - let reduced_names_after: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let first_name_value = document - .get("firstName") - .expect("we should be able to get the first name"); - let first_name = first_name_value - .as_text() - .expect("the first name should be a string"); - String::from(first_name) - }) - .collect(); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let expected_reduced_names = ["Gilligan".to_string(), "Dalia".to_string()]; + let expected_names_before_chris = [ + "Adey".to_string(), + "Briney".to_string(), + "Cammi".to_string(), + "Celinda".to_string(), + ]; + assert_eq!(names, expected_names_before_chris); - assert_eq!(reduced_names_after, expected_reduced_names); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + // A query getting all people who's first name starts with C -#[cfg(feature = "server")] -#[test] -fn test_family_sql_query() { - let platform_version = PlatformVersion::latest(); - // These helpers confirm that sql statements produce the same drive query - // as their json counterparts, helpers above confirm that the json queries - // produce the correct result set - let (drive, contract) = setup_family_tests(10, 73509, platform_version); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - - // Empty where clause - let query_cbor = cbor_serializer::serializable_value_to_cbor( - &json!({ - "where": [], + let query_value = json!({ + "where": [ + ["firstName", "StartsWith", "C"] + ], "limit": 100, "orderBy": [ ["firstName", "asc"] ] - }), - None, - ) - .expect("expected to serialize to cbor"); - let query1 = DriveDocumentQuery::from_cbor( - query_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("should build query"); + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let sql_string = "select * from person order by firstName asc limit 100"; - let query2 = - DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) - .expect("should build query"); + let expected_names_starting_with_c = ["Cammi".to_string(), "Celinda".to_string()]; + assert_eq!(names, expected_names_starting_with_c); - assert_eq!(query1, query2); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - // Equality clause - let query_cbor = cbor_serializer::serializable_value_to_cbor( - &json!({ + // A query getting all people who's first name starts with C, but limit to 1 and be descending + + let query_value = json!({ "where": [ - ["firstName", "==", "Chris"] + ["firstName", "StartsWith", "C"] + ], + "limit": 1, + "orderBy": [ + ["firstName", "desc"] ] - }), - None, - ) - .expect("expected to serialize to cbor"); - let query1 = DriveDocumentQuery::from_cbor( - query_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("should build query"); + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); + + let expected_names_starting_with_c_desc_1 = ["Celinda".to_string()]; + assert_eq!(names, expected_names_starting_with_c_desc_1); - let sql_string = "select * from person where firstName = 'Chris'"; - let query2 = - DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) - .expect("should build query"); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - assert_eq!(query1, query2); + // A query getting all people who's first name is between Chris and Noellyn included - // Less than - let query_cbor = cbor_serializer::serializable_value_to_cbor( - &json!({ + let query_value = json!({ "where": [ - ["firstName", "<", "Chris"] + ["firstName", ">", "Chris"], + ["firstName", "<=", "Noellyn"] ], "limit": 100, "orderBy": [ ["firstName", "asc"] ] - }), - None, - ) - .expect("expected to serialize to cbor"); - let query1 = DriveDocumentQuery::from_cbor( - query_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("should build query"); + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + assert_eq!(results.len(), 5); + + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); + + let expected_between_names = [ + "Dalia".to_string(), + "Gilligan".to_string(), + "Kevina".to_string(), + "Meta".to_string(), + "Noellyn".to_string(), + ]; - let sql_string = - "select * from person where firstName < 'Chris' order by firstName asc limit 100"; - let query2 = - DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) - .expect("should build query"); + assert_eq!(names, expected_between_names); - assert_eq!(query1, query2); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - // Starts with - let query_cbor = cbor_serializer::serializable_value_to_cbor( - &json!({ + // A query getting back elements having specific names + + let query_value = json!({ "where": [ - ["firstName", "StartsWith", "C"] + ["firstName", "in", names] ], "limit": 100, "orderBy": [ ["firstName", "asc"] ] - }), - None, - ) - .expect("expected to serialize to cbor"); - let query1 = DriveDocumentQuery::from_cbor( - query_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("should build query"); + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let sql_string = - "select * from person where firstName like 'C%' order by firstName asc limit 100"; - let query2 = - DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) - .expect("should build query"); + assert_eq!(names, expected_between_names); - assert_eq!(query1, query2); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - // Range combination - let query_cbor = cbor_serializer::serializable_value_to_cbor( - &json!({ + let query_value = json!({ "where": [ - ["firstName", ">", "Chris"], - ["firstName", "<=", "Noellyn"] + ["firstName", "in", names] ], "limit": 100, "orderBy": [ - ["firstName", "asc"] + ["firstName", "desc"] ] - }), - None, - ) - .expect("expected to serialize to cbor"); - let query1 = DriveDocumentQuery::from_cbor( - query_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("should build query"); + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); + + let expected_reversed_between_names = [ + "Noellyn".to_string(), + "Meta".to_string(), + "Kevina".to_string(), + "Gilligan".to_string(), + "Dalia".to_string(), + ]; - let sql_string = "select * from person where firstName > 'Chris' and firstName <= 'Noellyn' order by firstName asc limit 100"; - let query2 = - DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) - .expect("should build query"); + assert_eq!(names, expected_reversed_between_names); - assert_eq!(query1, query2); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - // In clause - let names = vec![String::from("a"), String::from("b")]; - let query_cbor = cbor_serializer::serializable_value_to_cbor( - &json!({ + // A query getting back elements having specific names and over a certain age + + let query_value = json!({ "where": [ - ["firstName", "in", names] + ["firstName", "in", names], + ["age", ">=", 45] ], "limit": 100, "orderBy": [ - ["firstName", "asc"] - ], - }), - None, - ) - .expect("expected to serialize to cbor"); - let query1 = DriveDocumentQuery::from_cbor( - query_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("should build query"); + ["firstName", "asc"], + ["age", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let sql_string = - "select * from person where firstName in ('a', 'b') order by firstName limit 100"; - let query2 = - DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) - .expect("should build query"); + let expected_names_45_over = [ + "Dalia".to_string(), + "Gilligan".to_string(), + "Kevina".to_string(), + "Meta".to_string(), + ]; - assert_eq!(query1, query2); -} + assert_eq!(names, expected_names_45_over); -#[cfg(feature = "server")] -#[test] -fn test_family_with_nulls_query() { - let (drive, contract) = setup_family_tests_with_nulls(10, 30004); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let platform_version = PlatformVersion::latest(); + // A query getting back elements having specific names and over a certain age - let epoch_change_fee_version_test: Lazy = - Lazy::new(|| BTreeMap::from([(0, FeeVersion::first())])); + let query_value = json!({ + "where": [ + ["firstName", "in", names], + ["age", ">", 48] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"], + ["age", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); + + // Kevina is 48 so she should be now excluded, Dalia is 68, Gilligan is 49 and Meta is 59 + + let expected_names_over_48 = [ + "Dalia".to_string(), + "Gilligan".to_string(), + "Meta".to_string(), + ]; + + assert_eq!(names, expected_names_over_48); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + let ages: HashMap = results + .into_iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let name = name_value + .as_text() + .expect("the first name should be a string") + .to_string(); + let age_value = document + .get("age") + .expect("we should be able to get the age"); + let age: u8 = age_value.to_integer().expect("expected u8 value"); + (name, age) + }) + .collect(); + + let meta_age = ages + .get("Meta") + .expect("we should be able to get Kevina as she is 48"); + + assert_eq!(*meta_age, 59); + + // fetching by $id + let mut rng = rand::rngs::StdRng::seed_from_u64(84594); + let id_bytes = bs58::decode("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD") + .into_vec() + .expect("this should decode"); + + let owner_id_bytes = bs58::decode("BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj") + .into_vec() + .expect("this should decode"); + + let fixed_person = Person { + id: id_bytes, + owner_id: owner_id_bytes, + first_name: String::from("Wisdom"), + middle_name: String::from("Madabuchukwu"), + last_name: String::from("Ogwu"), + age: rng.gen_range(0..85), + }; + let serialized_person = serde_json::to_value(fixed_person).expect("serialized person"); + let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) + .expect("expected to serialize to cbor"); + let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); - let db_transaction = drive.grove.start_transaction(); + let document_type = contract + .document_type_for_name("person") + .expect("expected to get document type"); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 144, 185, 8, 30, 191, 97, 149, 182, 117, 203, 98, 187, 156, 93, 168, 171, 134, 112, 221, - 230, 249, 131, 86, 1, 26, 92, 242, 25, 251, 187, 192, 182, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - let all_names = [ - "".to_string(), - "".to_string(), - "".to_string(), - "".to_string(), - "".to_string(), - "".to_string(), - "Alexia".to_string(), - "Gerti".to_string(), - "Latisha".to_string(), - "Norry".to_string(), - ]; - - // A query getting all elements by firstName - - let query_value = json!({ - "where": [ - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let person_document_type = contract - .document_type_for_name("person") - .expect("contract should have a person document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - person_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .clone() - .into_iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document - .get("firstName") - .map(|value| { - let first_name_value = value - .as_text() - .expect("the normalized label should be a string"); - String::from(first_name_value) - }) - .unwrap_or_default() - }) - .collect(); - - assert_eq!(names, all_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let ids: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), person_document_type, platform_version) - .expect("we should be able to deserialize the document"); - base64::engine::general_purpose::STANDARD.encode(document.id().as_slice()) - }) - .collect(); + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - for i in 0..10 { drive - .delete_document_for_contract( - base64::engine::general_purpose::STANDARD - .decode(ids.get(i).unwrap()) - .expect("expected to decode from base64") - .try_into() - .expect("expected to get 32 bytes"), - &contract, - "person", + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, BlockInfo::genesis(), true, Some(&db_transaction), platform_version, - Some(&epoch_change_fee_version_test), + None, ) - .expect("expected to be able to delete the document"); - } + .expect("document should be inserted"); - drive - .grove - .commit_transaction(db_transaction) - .unwrap() - .expect("unable to commit transaction"); -} + let id_two_bytes = bs58::decode("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179") + .into_vec() + .expect("should decode"); + let owner_id_bytes = bs58::decode("Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA") + .into_vec() + .expect("this should decode"); + let next_person = Person { + id: id_two_bytes, + owner_id: owner_id_bytes, + first_name: String::from("Wdskdfslgjfdlj"), + middle_name: String::from("Mdsfdsgsdl"), + last_name: String::from("dkfjghfdk"), + age: rng.gen_range(0..85), + }; + let serialized_person = serde_json::to_value(next_person).expect("serialized person"); + let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) + .expect("expected to serialize to cbor"); + let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); -#[cfg(feature = "server")] -#[test] -fn test_query_with_cached_contract() { - let platform_version = PlatformVersion::latest(); - let (drive, contract) = setup_family_tests(10, 73509, platform_version); + let document_type = contract + .document_type_for_name("person") + .expect("expected to get document type"); - let db_transaction = drive.grove.start_transaction(); + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); - // Make sure the state is deterministic - let expected_app_hash = vec![ - 63, 90, 74, 129, 70, 204, 232, 67, 190, 85, 133, 79, 254, 245, 203, 180, 77, 67, 94, 22, - 180, 99, 51, 251, 82, 117, 211, 14, 136, 51, 228, 177, - ]; + let query_value = json!({ + "where": [ + ["$id", "in", vec![String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + }); - assert_eq!(root_hash.as_slice(), expected_app_hash); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - // Make sure contract is not cached - let contract_ref = drive - .get_cached_contract_with_fetch_info( - *contract.id_ref().as_bytes(), - Some(&db_transaction), - &platform_version.drive, - ) - .expect("should return a contract ref"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - assert!(contract_ref.is_none()); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + + assert_eq!(results.len(), 1); + + // TODO: Add test for proofs after transaction + // drive.grove.commit_transaction(db_transaction).expect("unable to commit transaction"); + // let (proof_root_hash, proof_results) = drive + // .query_documents_from_contract_as_grove_proof_only_get_elements( + // &contract, + // person_document_type, + // query_cbor.as_slice(), + // None, + // ) + // .expect("query should be executed"); + // assert_eq!(root_hash, proof_root_hash); + // assert_eq!(results, proof_results); + // let db_transaction = drive.grove.start_transaction(); + + // fetching by $id with order by + + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "orderBy": [["$id", "asc"]], + }); - // A query getting all elements by firstName + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let query_value = json!({ - "where": [ - ], - "limit": 100, - "orderBy": [ - ["firstName", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let QuerySerializedDocumentsOutcome { items, .. } = drive - .query_documents_cbor_with_document_type_lookup( - where_cbor.as_slice(), - *contract.id_ref().as_bytes(), - "person", - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - assert_eq!(items.len(), 10); + assert_eq!(results.len(), 2); - // Cache was populated and there only two ref two the cached fetched info (here and cache) - let contract_ref = drive - .get_cached_contract_with_fetch_info( - *contract.id_ref().as_bytes(), - Some(&db_transaction), - &platform_version.drive, + let last_person = Document::from_bytes( + results.first().unwrap().as_slice(), + document_type, + platform_version, ) - .expect("should return a contract ref") - .expect("expected a reference counter to the contract"); + .expect("we should be able to deserialize the document"); - assert_eq!(Arc::strong_count(&contract_ref), 2); -} - -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_contract_verification() { - let platform_version = PlatformVersion::latest(); - let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + assert_eq!( + last_person.id().to_vec(), + vec![ + 76, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, + 189, 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, + ] + ); - let root_hash = drive - .grove - .root_hash(None, &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); + // fetching by $id with order by desc - let contract_proof = drive - .prove_contract(contract.id().into_buffer(), None, platform_version) - .expect("expected to get proof"); - let (proof_root_hash, proof_returned_contract) = Drive::verify_contract( - contract_proof.as_slice(), - None, - false, - false, - contract.id().into_buffer(), - platform_version, - ) - .expect("expected to get contract from proof"); + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "orderBy": [["$id", "desc"]], + }); - assert_eq!(root_hash, proof_root_hash); - assert_eq!( - contract, - proof_returned_contract.expect("expected to get a contract") - ); -} + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); -#[test] -fn test_contract_keeps_history_fetch_and_verification() { - let (drive, contract) = setup_references_tests(10, 3334); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let platform_version = PlatformVersion::latest(); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let root_hash = drive - .grove - .root_hash(None, &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); + assert_eq!(results.len(), 2); - drive - .fetch_contract( - contract.id().to_buffer(), - None, - None, - None, + let last_person = Document::from_bytes( + results.first().unwrap().as_slice(), + document_type, platform_version, ) - .unwrap() - .expect("expected to be able to fetch a contract") - .expect("expected a contract to be present"); - - let contract_proof = drive - .prove_contract(contract.id().into_buffer(), None, platform_version) - .expect("expected to get proof"); - let (proof_root_hash, proof_returned_contract) = Drive::verify_contract( - contract_proof.as_slice(), - None, - false, - false, - contract.id().into_buffer(), - platform_version, - ) - .expect("expected to get contract from proof"); + .expect("we should be able to deserialize the document"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!( - contract, - proof_returned_contract.expect("expected to get a contract") - ); -} + assert_eq!( + last_person.id().to_vec(), + vec![ + 140, 161, 17, 201, 152, 232, 129, 48, 168, 13, 49, 10, 218, 53, 118, 136, 165, 198, + 189, 116, 116, 22, 133, 92, 104, 165, 186, 249, 94, 81, 45, 20, + ] + ); -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_first_version() { - let platform_version = PlatformVersion::first(); - let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, &platform_version); + // + // // fetching with empty where and orderBy + // + let query_value = json!({}); - let db_transaction = drive.grove.start_transaction(); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 142, 246, 25, 166, 52, 184, 158, 102, 192, 111, 173, 255, 155, 125, 53, 233, 98, 241, 201, - 233, 2, 58, 47, 90, 209, 207, 147, 204, 83, 68, 183, 143, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - let all_names = [ - "amalle".to_string(), - "anna-diane".to_string(), - "atalanta".to_string(), - "eden".to_string(), - "laureen".to_string(), - "leone".to_string(), - "marilyn".to_string(), - "minna".to_string(), - "mora".to_string(), - "phillie".to_string(), - ]; - - // A query getting all elements by firstName - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "limit": 100, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); - - assert_eq!(names, all_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting all elements starting with a in dash parent domain - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"], - ["normalizedLabel", "startsWith", "a"] - ], - "limit": 5, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); - - let a_names = [ - "amalle".to_string(), - "anna-diane".to_string(), - "atalanta".to_string(), - ]; - - assert_eq!(names, a_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let ids: Vec = results - .into_iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - hex::encode(document.id().as_slice()) - }) - .collect(); - - let a_ids = [ - "61978359176813a3e9b79c07df8addda2aea3841cfff2afe5b23cf1b5b926c1b".to_string(), - "0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080".to_string(), - "26a9344b6d0fcf8f525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a".to_string(), - ]; - - assert_eq!(ids, a_ids); - - // A query getting one element starting with a in dash parent domain asc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"], - ["normalizedLabel", "startsWith", "a"] - ], - "startAt": encoded_start_at, - "limit": 1, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); - - let a_names = ["anna-diane".to_string()]; - - assert_eq!(names, a_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting one element starting with a in dash parent domain desc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"], - ["normalizedLabel", "startsWith", "a"] - ], - "startAt": encoded_start_at, - "limit": 1, - "orderBy": [ - ["normalizedLabel", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); - - let a_names = ["anna-diane".to_string()]; - - assert_eq!(names, a_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - let record_id_base68: Vec = results - .into_iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - - let records_value = document - .get("records") - .expect("we should be able to get the records"); - let map_records_value = records_value.as_map().expect("this should be a map"); - let record_dash_unique_identity_id = - Value::inner_optional_bytes_value(map_records_value, "dashUniqueIdentityId") - .unwrap() - .expect("there should be a dashUniqueIdentityId"); - bs58::encode(record_dash_unique_identity_id).into_string() - }) - .collect(); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let a_record_id_base58 = ["5hXRj1xmmnNQ7RN1ATYym4x6bQugxcKn7FWiMnkQTQpF".to_string()]; + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - assert_eq!(record_id_base68, a_record_id_base58); + assert_eq!(results.len(), 12); - // A query getting elements by the identity desc + // + // // fetching with empty where and orderBy $id desc + // + let query_value = json!({ + "orderBy": [["$id", "desc"]] + }); - let query_value = json!({ - "where": [ - ["records.dashUniqueIdentityId", "<=", "5hXRj1xmmnNQ7RN1ATYym4x6bQugxcKn7FWiMnkQTQpF"], - ], - "limit": 10, - "orderBy": [ - ["records.dashUniqueIdentityId", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); - - let a_names = [ - "anna-diane".to_string(), - "marilyn".to_string(), - "minna".to_string(), - ]; - - assert_eq!(names, a_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting 2 elements asc by the identity - - let query_value = json!({ - "where": [ - ["records.dashUniqueIdentityId", "<=", "5hXRj1xmmnNQ7RN1ATYym4x6bQugxcKn7FWiMnkQTQpF"], - ], - "limit": 2, - "orderBy": [ - ["records.dashUniqueIdentityId", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let a_names = ["minna".to_string(), "marilyn".to_string()]; + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - assert_eq!(names, a_names); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); + assert_eq!(results.len(), 12); - // A query getting all elements + let last_person = Document::from_bytes( + results.first().unwrap().as_slice(), + document_type, + platform_version, + ) + .expect("we should be able to deserialize the document"); - let query_value = json!({ - "orderBy": [ - ["records.dashUniqueIdentityId", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - - assert_eq!(results.len(), 10); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + assert_eq!( + last_person.id().to_vec(), + vec![ + 249, 170, 70, 122, 181, 31, 35, 176, 175, 131, 70, 150, 250, 223, 194, 203, 175, + 200, 107, 252, 199, 227, 154, 105, 89, 57, 38, 85, 236, 192, 254, 88, + ] + ); -#[cfg(feature = "server")] -#[test] -fn test_dpns_insertion_no_aliases() { - // using ascending order with rangeTo operators - let (drive, contract) = - setup_dpns_test_with_data("tests/supporting_files/contract/dpns/domains-no-alias.json"); + // + // // fetching with ownerId in a set of values + // + let query_value = json!({ + "where": [ + ["$ownerId", "in", ["BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj", "Di8dtJXv3L2YnzDNUN4w5rWLPSsSAzv6hLMMQbg3eyVA"]] + ], + "orderBy": [["$ownerId", "desc"]] + }); - let platform_version = PlatformVersion::latest(); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let db_transaction = drive.grove.start_transaction(); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let query_value = json!({ - "orderBy": [["records.dashUniqueIdentityId", "desc"]], - }); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + assert_eq!(results.len(), 2); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); + // + // // fetching with ownerId equal and orderBy + // + let query_value = json!({ + "where": [ + ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"] + ], + "orderBy": [["$ownerId", "asc"]] + }); - let result = drive - .query_documents_cbor_from_contract( - &contract, - domain_document_type, - query_cbor.as_slice(), - None, - Some(&db_transaction), - Some(platform_version.protocol_version), - ) - .expect("should perform query"); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - assert_eq!(result.0.len(), 15); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - domain_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + + assert_eq!(results.len(), 1); + + // query empty contract with nested path queries + + let dashpay_contract = json_document_to_contract( + "tests/supporting_files/contract/dashpay/dashpay-contract.json", + false, + platform_version, ) - .expect("query should be executed"); - assert_eq!( + .expect("expected to get cbor document"); + drive - .grove - .root_hash(None, &platform_version.drive.grove_version) - .unwrap() - .expect("should get root hash"), - proof_root_hash - ); - assert_eq!(result.0, proof_results); -} + .apply_contract( + &dashpay_contract, + BlockInfo::default(), + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("expected to apply contract successfully"); -#[cfg(feature = "server")] -#[test] -fn test_dpns_insertion_with_aliases() { - // using ascending order with rangeTo operators - let (drive, contract) = - setup_dpns_test_with_data("tests/supporting_files/contract/dpns/domains.json"); + let query_value = json!({ + "where": [ + ["$ownerId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], + ["toUserId", "==", "BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj"], + ], + }); - let platform_version = PlatformVersion::latest(); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let db_transaction = drive.grove.start_transaction(); + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &dashpay_contract, + dashpay_contract + .document_type_for_name("contactRequest") + .expect("should have contact document type"), + &query_cbor, + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); - let query_value = json!({ - "orderBy": [["records.dashUniqueIdentityId", "desc"]], - }); + assert_eq!(results.len(), 0); - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); + // using non existing document in startAt - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("5A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178")]], + ], + "orderBy": [["$id", "asc"]], + }); + + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + + let (results, _, _) = drive + .query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + + assert_eq!(results.len(), 1); - let result = drive - .query_documents_cbor_from_contract( + // using non existing document in startAt + + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "startAt": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), + "orderBy": [["$id", "asc"]], + }); + + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + + let result = drive.query_documents_cbor_from_contract( &contract, - domain_document_type, + person_document_type, query_cbor.as_slice(), None, Some(&db_transaction), Some(platform_version.protocol_version), - ) - .expect("should perform query"); + ); - assert_eq!(result.0.len(), 24); + assert!( + matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAt document not found") + ); - let (proof_root_hash, proof_results, _) = drive - .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( - &contract, - domain_document_type, - query_cbor.as_slice(), - None, - None, - Some(platform_version.protocol_version), - ) - .expect("query should be executed"); - assert_eq!( - drive - .grove - .root_hash(None, &platform_version.drive.grove_version) - .unwrap() - .expect("should get root hash"), - proof_root_hash - ); - assert_eq!(result.0, proof_results); -} + // using non existing document in startAfter -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_at_first_version() { - let platform_version = PlatformVersion::first(); - // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. - let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + let query_value = json!({ + "where": [ + ["$id", "in", [String::from("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD"), String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF179")]], + ], + "startAfter": String::from("6A8SGgdmj2NtWCYoYDPDpbsYkq2MCbgi6Lx4ALLfF178"), + "orderBy": [["$id", "asc"]], + }); - let platform_version = PlatformVersion::latest(); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - let db_transaction = drive.grove.start_transaction(); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 142, 246, 25, 166, 52, 184, 158, 102, 192, 111, 173, 255, 155, 125, 53, 233, 98, 241, 201, - 233, 2, 58, 47, 90, 209, 207, 147, 204, 83, 68, 183, 143, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash,); - - // let all_names = [ - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - // A query getting one element starting with a in dash parent domain asc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAt": encoded_start_at, - "limit": 1, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); + let result = drive.query_documents_cbor_from_contract( + &contract, + person_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ); - let a_names = ["anna-diane".to_string()]; + assert!( + matches!(result, Err(Error::Query(QuerySyntaxError::StartDocumentNotFound(message))) if message == "startAfter document not found") + ); - assert_eq!(names, a_names); + // validate eventual root hash - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_at_latest_version() { - let platform_version = PlatformVersion::latest(); - // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. - let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + assert_eq!( + root_hash.as_slice(), + vec![ + 5, 92, 86, 251, 178, 238, 8, 246, 80, 139, 148, 81, 135, 108, 57, 197, 114, 102, + 219, 71, 50, 0, 47, 252, 106, 157, 118, 30, 128, 199, 55, 126, + ], + ); + } - let platform_version = PlatformVersion::latest(); + #[cfg(feature = "server")] + #[test] + fn test_family_person_update() { + let platform_version = PlatformVersion::latest(); + let (drive, contract) = setup_family_tests(10, 73509, platform_version); + + let epoch_change_fee_version_test: Lazy = + Lazy::new(|| BTreeMap::from([(0, FeeVersion::first())])); + + let db_transaction = drive.grove.start_transaction(); + + let mut rng = rand::rngs::StdRng::seed_from_u64(84594); + let id_bytes = bs58::decode("ATxXeP5AvY4aeUFA6WRo7uaBKTBgPQCjTrgtNpCMNVRD") + .into_vec() + .expect("this should decode"); + + let owner_id_bytes = bs58::decode("BYR3zJgXDuz1BYAkEagwSjVqTcE1gbqEojd6RwAGuMzj") + .into_vec() + .expect("this should decode"); + + let fixed_person = Person { + id: id_bytes.clone(), + owner_id: owner_id_bytes.clone(), + first_name: String::from("Wisdom"), + middle_name: String::from("Madman"), + last_name: String::from("Ogwu"), + age: rng.gen_range(0..85), + }; + let serialized_person = serde_json::to_value(fixed_person).expect("serialized person"); + let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) + .expect("expected to serialize to cbor"); + let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); - let db_transaction = drive.grove.start_transaction(); + let document_type = contract + .document_type_for_name("person") + .expect("expected to get document type"); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 248, 74, 104, 110, 129, 228, 194, 1, 4, 239, 134, 54, 105, 172, 221, 43, 101, 133, 235, - 146, 182, 153, 212, 118, 189, 99, 227, 14, 94, 83, 17, 98, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash,); - - // let all_names = [ - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - // A query getting one element starting with a in dash parent domain asc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAt": encoded_start_at, - "limit": 1, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - let a_names = ["anna-diane".to_string()]; + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document, storage_flags.clone())), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); - assert_eq!(names, a_names); + let updated_fixed_person = Person { + id: id_bytes, + owner_id: owner_id_bytes, + first_name: String::from("Wisdom"), + middle_name: String::from("Madabuchukwu"), + last_name: String::from("Ogwu"), + age: rng.gen_range(0..85), + }; + let serialized_person = + serde_json::to_value(updated_fixed_person).expect("serialized person"); + let person_cbor = cbor_serializer::serializable_value_to_cbor(&serialized_person, Some(0)) + .expect("expected to serialize to cbor"); + let document = Document::from_cbor(person_cbor.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let fee = drive + .update_document_for_contract( + &document, + &contract, + document_type, + None, + BlockInfo::genesis(), + true, + storage_flags, + Some(&db_transaction), + platform_version, + Some(&epoch_change_fee_version_test), + ) + .expect("expected to override document"); + assert!(fee.storage_fee > 0); -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_after() { - let platform_version = PlatformVersion::latest(); - // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. - let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + let query_value = json!({ + "where": [ + ["firstName", "==", "Wisdom"] + ], + "limit": 1, + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); - let platform_version = PlatformVersion::latest(); + assert_eq!(results.len(), 1); - let db_transaction = drive.grove.start_transaction(); + drive + .commit_transaction(db_transaction, &platform_version.drive) + .expect("expected to commit transaction"); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 248, 74, 104, 110, 129, 228, 194, 1, 4, 239, 134, 54, 105, 172, 221, 43, 101, 133, 235, - 146, 182, 153, 212, 118, 189, 99, 227, 14, 94, 83, 17, 98, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // let all_names = [ - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - // A query getting one element starting with a in dash parent domain asc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAfter": encoded_start_at, - "limit": 2, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); + let (proof, _fee) = query + .clone() + .execute_with_proof(&drive, None, None, platform_version) + .expect("expected proof to be generated"); - let a_names = ["atalanta".to_string(), "eden".to_string()]; + let (_root_hash, documents) = query + .verify_proof(&proof, platform_version) + .expect("expected to verify proof"); - assert_eq!(names, a_names); + assert_eq!(documents.len(), 1); + } - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + #[cfg(feature = "server")] + #[test] + fn test_family_starts_at_queries() { + let platform_version = PlatformVersion::latest(); + let (drive, contract) = setup_family_tests(10, 73509, platform_version); -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_at_desc() { - let platform_version = PlatformVersion::latest(); - // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. - let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + let db_transaction = drive.grove.start_transaction(); - let platform_version = PlatformVersion::latest(); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 59, 253, 119, 177, 148, 100, 153, 121, 228, 238, 250, 185, 103, 53, 113, 8, 30, 192, + 75, 150, 153, 2, 24, 109, 93, 91, 97, 75, 106, 35, 29, 252, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // let all_names = [ + // "Adey".to_string(), + // "Briney".to_string(), + // "Cammi".to_string(), + // "Celinda".to_string(), + // "Dalia".to_string(), + // "Gilligan".to_string(), + // "Kevina".to_string(), + // "Meta".to_string(), + // "Noellyn".to_string(), + // "Prissie".to_string(), + // ]; + + let kevina_encoded_id = "B4zLoYmSGz5SyD7QjAvcjAWtzGCfnQDCti3o7V2ZBDNo".to_string(); + + let query_value = json!({ + "where": [ + ["firstName", ">", "Chris"], + ["firstName", "<=", "Noellyn"] + ], + "startAt": kevina_encoded_id, //Kevina + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + + let reduced_names_after: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let db_transaction = drive.grove.start_transaction(); + let expected_reduced_names = [ + "Kevina".to_string(), + "Meta".to_string(), + "Noellyn".to_string(), + ]; - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 248, 74, 104, 110, 129, 228, 194, 1, 4, 239, 134, 54, 105, 172, 221, 43, 101, 133, 235, - 146, 182, 153, 212, 118, 189, 99, 227, 14, 94, 83, 17, 98, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // let all_names = [ - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - // A query getting one element starting with a in dash parent domain asc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAt": encoded_start_at, - "limit": 2, - "orderBy": [ - ["normalizedLabel", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); + assert_eq!(reduced_names_after, expected_reduced_names); - let a_names = ["anna-diane".to_string(), "amalle".to_string()]; + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - assert_eq!(names, a_names); + // Now lets try startsAfter - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let query_value = json!({ + "where": [ + ["firstName", ">", "Chris"], + ["firstName", "<=", "Noellyn"] + ], + "startAfter": kevina_encoded_id, //Kevina + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + + let reduced_names_after: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_after_desc() { - let platform_version = PlatformVersion::latest(); - // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. - let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + let expected_reduced_names = ["Meta".to_string(), "Noellyn".to_string()]; - let platform_version = PlatformVersion::latest(); + assert_eq!(reduced_names_after, expected_reduced_names); - let db_transaction = drive.grove.start_transaction(); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 248, 74, 104, 110, 129, 228, 194, 1, 4, 239, 134, 54, 105, 172, 221, 43, 101, 133, 235, - 146, 182, 153, 212, 118, 189, 99, 227, 14, 94, 83, 17, 98, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // let all_names = [ - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - // A query getting one element starting with a in dash parent domain asc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAfter": encoded_start_at, - "limit": 2, - "orderBy": [ - ["normalizedLabel", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .expect("we should be able to get the first name"); - let normalized_label = normalized_label_value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .collect(); + let query_value = json!({ + "where": [ + ["firstName", ">", "Chris"], + ["firstName", "<=", "Noellyn"] + ], + "startAt": kevina_encoded_id, //Kevina + "limit": 100, + "orderBy": [ + ["firstName", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + + let reduced_names_after: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let a_names = ["amalle".to_string()]; + let expected_reduced_names = [ + "Kevina".to_string(), + "Gilligan".to_string(), + "Dalia".to_string(), + ]; - assert_eq!(names, a_names); + assert_eq!(reduced_names_after, expected_reduced_names); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_at_with_null_id() { - // The point of this test is to test the situation where we have a start at inside an index with a null value - // While dpns doesn't really support this, other contracts might allow null values. - // We are just using the DPNS contract because it is handy. - let (drive, contract) = setup_dpns_tests_label_not_required(10, 11456); + // Now lets try startsAfter - let platform_version = PlatformVersion::latest(); + let query_value = json!({ + "where": [ + ["firstName", ">", "Chris"], + ["firstName", "<=", "Noellyn"] + ], + "startAfter": kevina_encoded_id, //Kevina + "limit": 100, + "orderBy": [ + ["firstName", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, None, platform_version) + .expect("proof should be executed"); + assert_eq!(results.len(), 2); + + let reduced_names_after: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let first_name_value = document + .get("firstName") + .expect("we should be able to get the first name"); + let first_name = first_name_value + .as_text() + .expect("the first name should be a string"); + String::from(first_name) + }) + .collect(); - let document_type = contract - .document_type_for_name("domain") - .expect("expected to get document type"); + let expected_reduced_names = ["Gilligan".to_string(), "Dalia".to_string()]; - let db_transaction = drive.grove.start_transaction(); + assert_eq!(reduced_names_after, expected_reduced_names); - let mut rng = rand::rngs::StdRng::seed_from_u64(11456); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } - let domain0_id = Identifier::random_with_rng(&mut rng); - let domain0 = Domain { - id: domain0_id, - owner_id: Identifier::random_with_rng(&mut rng), - label: None, - normalized_label: None, - normalized_parent_domain_name: "dash".to_string(), - records: Records { - dash_unique_identity_id: Identifier::random_with_rng(&mut rng), - }, - preorder_salt: Bytes32::random_with_rng(&mut rng), - subdomain_rules: SubdomainRules { - allow_subdomains: false, - }, - }; + #[cfg(feature = "server")] + #[test] + fn test_family_sql_query() { + let platform_version = PlatformVersion::latest(); + // These helpers confirm that sql statements produce the same drive query + // as their json counterparts, helpers above confirm that the json queries + // produce the correct result set + let (drive, contract) = setup_family_tests(10, 73509, platform_version); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + + // Empty where clause + let query_cbor = cbor_serializer::serializable_value_to_cbor( + &json!({ + "where": [], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }), + None, + ) + .expect("expected to serialize to cbor"); + let query1 = DriveDocumentQuery::from_cbor( + query_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("should build query"); + + let sql_string = "select * from person order by firstName asc limit 100"; + let query2 = + DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) + .expect("should build query"); + + assert_eq!(query1, query2); + + // Equality clause + let query_cbor = cbor_serializer::serializable_value_to_cbor( + &json!({ + "where": [ + ["firstName", "==", "Chris"] + ] + }), + None, + ) + .expect("expected to serialize to cbor"); + let query1 = DriveDocumentQuery::from_cbor( + query_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("should build query"); - let value0 = platform_value::to_value(domain0).expect("serialized domain"); - let document0 = Document::from_platform_value(value0, platform_version) - .expect("document should be properly deserialized"); + let sql_string = "select * from person where firstName = 'Chris'"; + let query2 = + DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) + .expect("should build query"); - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + assert_eq!(query1, query2); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document0, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, + // Less than + let query_cbor = cbor_serializer::serializable_value_to_cbor( + &json!({ + "where": [ + ["firstName", "<", "Chris"] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }), None, ) - .expect("document should be inserted"); + .expect("expected to serialize to cbor"); + let query1 = DriveDocumentQuery::from_cbor( + query_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("should build query"); - let domain1_id = Identifier::random_with_rng(&mut rng); + let sql_string = + "select * from person where firstName < 'Chris' order by firstName asc limit 100"; + let query2 = + DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) + .expect("should build query"); - let domain1 = Domain { - id: domain1_id, - owner_id: Identifier::random_with_rng(&mut rng), - label: None, - normalized_label: None, - normalized_parent_domain_name: "dash".to_string(), - records: Records { - dash_unique_identity_id: Identifier::random_with_rng(&mut rng), - }, - preorder_salt: Bytes32::random_with_rng(&mut rng), - subdomain_rules: SubdomainRules { - allow_subdomains: false, - }, - }; + assert_eq!(query1, query2); - let value1 = serde_json::to_value(domain1).expect("serialized domain"); - let document_cbor1 = cbor_serializer::serializable_value_to_cbor(&value1, Some(0)) + // Starts with + let query_cbor = cbor_serializer::serializable_value_to_cbor( + &json!({ + "where": [ + ["firstName", "StartsWith", "C"] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }), + None, + ) .expect("expected to serialize to cbor"); - let document1 = Document::from_cbor(document_cbor1.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); - - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document1, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, + let query1 = DriveDocumentQuery::from_cbor( + query_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("should build query"); + + let sql_string = + "select * from person where firstName like 'C%' order by firstName asc limit 100"; + let query2 = + DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) + .expect("should build query"); + + assert_eq!(query1, query2); + + // Range combination + let query_cbor = cbor_serializer::serializable_value_to_cbor( + &json!({ + "where": [ + ["firstName", ">", "Chris"], + ["firstName", "<=", "Noellyn"] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }), None, ) - .expect("document should be inserted"); + .expect("expected to serialize to cbor"); + let query1 = DriveDocumentQuery::from_cbor( + query_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("should build query"); - drive - .grove - .commit_transaction(db_transaction) - .unwrap() - .expect("transaction should be committed"); + let sql_string = "select * from person where firstName > 'Chris' and firstName <= 'Noellyn' order by firstName asc limit 100"; + let query2 = + DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) + .expect("should build query"); - let db_transaction = drive.grove.start_transaction(); + assert_eq!(query1, query2); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 11, 67, 98, 193, 214, 56, 66, 244, 193, 131, 252, 190, 5, 52, 29, 96, 160, 27, 222, 78, 91, - 150, 54, 85, 81, 249, 14, 74, 213, 181, 254, 120, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // let all_names = [ - // "".to_string(), x2 - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - // A query getting one element starting with a in dash parent domain asc - - let encoded_start_at = bs58::encode(domain0_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAt": encoded_start_at, - "limit": 3, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + // In clause + let names = vec![String::from("a"), String::from("b")]; + let query_cbor = cbor_serializer::serializable_value_to_cbor( + &json!({ + "where": [ + ["firstName", "in", names] + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ], + }), + None, + ) .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document - .get("normalizedLabel") - .map(|value| { - let normalized_label = value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .unwrap_or_default() - }) - .collect(); + let query1 = DriveDocumentQuery::from_cbor( + query_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("should build query"); - let a_names = [ - "".to_string(), - "amalle".to_string(), - "anna-diane".to_string(), - ]; + let sql_string = + "select * from person where firstName in ('a', 'b') order by firstName limit 100"; + let query2 = + DriveDocumentQuery::from_sql_expr(sql_string, &contract, Some(&DriveConfig::default())) + .expect("should build query"); - assert_eq!(names, a_names); + assert_eq!(query1, query2); + } - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + #[cfg(feature = "server")] + #[test] + fn test_family_with_nulls_query() { + let (drive, contract) = setup_family_tests_with_nulls(10, 30004); -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_after_with_null_id() { - // The point of this test is to test the situation where we have a start at inside an index with a null value - // While dpns doesn't really support this, other contracts might allow null values. - // We are just using the DPNS contract because it is handy. - let (drive, contract) = setup_dpns_tests_label_not_required(10, 11456); + let platform_version = PlatformVersion::latest(); - let platform_version = PlatformVersion::latest(); + let epoch_change_fee_version_test: Lazy = + Lazy::new(|| BTreeMap::from([(0, FeeVersion::first())])); - let document_type = contract - .document_type_for_name("domain") - .expect("expected to get document type"); + let db_transaction = drive.grove.start_transaction(); - let db_transaction = drive.grove.start_transaction(); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 40, 208, 218, 141, 51, 7, 57, 5, 17, 42, 78, 70, 239, 65, 98, 146, 20, 42, 68, 135, + 241, 126, 28, 204, 213, 7, 128, 14, 31, 163, 15, 2, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + let all_names = [ + "".to_string(), + "".to_string(), + "".to_string(), + "".to_string(), + "".to_string(), + "".to_string(), + "Alexia".to_string(), + "Gerti".to_string(), + "Latisha".to_string(), + "Norry".to_string(), + ]; + + // A query getting all elements by firstName + + let query_value = json!({ + "where": [ + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let person_document_type = contract + .document_type_for_name("person") + .expect("contract should have a person document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + person_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .clone() + .into_iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document + .get("firstName") + .map(|value| { + let first_name_value = value + .as_text() + .expect("the normalized label should be a string"); + String::from(first_name_value) + }) + .unwrap_or_default() + }) + .collect(); + + assert_eq!(names, all_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + let ids: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), person_document_type, platform_version) + .expect("we should be able to deserialize the document"); + base64::engine::general_purpose::STANDARD.encode(document.id().as_slice()) + }) + .collect(); + + for i in 0..10 { + drive + .delete_document_for_contract( + base64::engine::general_purpose::STANDARD + .decode(ids.get(i).unwrap()) + .expect("expected to decode from base64") + .try_into() + .expect("expected to get 32 bytes"), + &contract, + "person", + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + Some(&epoch_change_fee_version_test), + ) + .expect("expected to be able to delete the document"); + } - let mut rng = rand::rngs::StdRng::seed_from_u64(11456); + drive + .grove + .commit_transaction(db_transaction) + .unwrap() + .expect("unable to commit transaction"); + } - let domain0_id = Identifier::random_with_rng(&mut rng); - let domain0 = Domain { - id: domain0_id, - owner_id: Identifier::random_with_rng(&mut rng), - label: None, - normalized_label: None, - normalized_parent_domain_name: "dash".to_string(), - records: Records { - dash_unique_identity_id: Identifier::random_with_rng(&mut rng), - }, - preorder_salt: Bytes32::random_with_rng(&mut rng), - subdomain_rules: SubdomainRules { - allow_subdomains: false, - }, - }; + #[cfg(feature = "server")] + #[test] + fn test_query_with_cached_contract() { + let platform_version = PlatformVersion::latest(); + let (drive, contract) = setup_family_tests(10, 73509, platform_version); - let value0 = serde_json::to_value(domain0).expect("serialized domain"); - let document_cbor0 = cbor_serializer::serializable_value_to_cbor(&value0, Some(0)) - .expect("expected to serialize to cbor"); - let document0 = Document::from_cbor(document_cbor0.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); + let db_transaction = drive.grove.start_transaction(); - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document0, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); + // Make sure the state is deterministic + let expected_app_hash = vec![ + 59, 253, 119, 177, 148, 100, 153, 121, 228, 238, 250, 185, 103, 53, 113, 8, 30, 192, + 75, 150, 153, 2, 24, 109, 93, 91, 97, 75, 106, 35, 29, 252, + ]; - let domain1_id = Identifier::random_with_rng(&mut rng); + assert_eq!(root_hash.as_slice(), expected_app_hash); - assert!(domain0_id > domain1_id); + // Make sure contract is not cached + let contract_ref = drive + .get_cached_contract_with_fetch_info( + *contract.id_ref().as_bytes(), + Some(&db_transaction), + &platform_version.drive, + ) + .expect("should return a contract ref"); - let domain1 = Domain { - id: domain1_id, - owner_id: Identifier::random_with_rng(&mut rng), - label: None, - normalized_label: None, - normalized_parent_domain_name: "dash".to_string(), - records: Records { - dash_unique_identity_id: Identifier::random_with_rng(&mut rng), - }, - preorder_salt: Bytes32::random_with_rng(&mut rng), - subdomain_rules: SubdomainRules { - allow_subdomains: false, - }, - }; + assert!(contract_ref.is_none()); - let value1 = serde_json::to_value(domain1).expect("serialized domain"); - let document_cbor1 = cbor_serializer::serializable_value_to_cbor(&value1, Some(0)) - .expect("expected to serialize to cbor"); - let document1 = Document::from_cbor(document_cbor1.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); + // A query getting all elements by firstName - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + let query_value = json!({ + "where": [ + ], + "limit": 100, + "orderBy": [ + ["firstName", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document1, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), + let QuerySerializedDocumentsOutcome { items, .. } = drive + .query_documents_cbor_with_document_type_lookup( + where_cbor.as_slice(), + *contract.id_ref().as_bytes(), + "person", + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + + assert_eq!(items.len(), 10); + + // Cache was populated and there only two ref two the cached fetched info (here and cache) + let contract_ref = drive + .get_cached_contract_with_fetch_info( + *contract.id_ref().as_bytes(), + Some(&db_transaction), + &platform_version.drive, + ) + .expect("should return a contract ref") + .expect("expected a reference counter to the contract"); + + assert_eq!(Arc::strong_count(&contract_ref), 2); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_contract_verification() { + let platform_version = PlatformVersion::latest(); + let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + + let root_hash = drive + .grove + .root_hash(None, &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let contract_proof = drive + .prove_contract(contract.id().into_buffer(), None, platform_version) + .expect("expected to get proof"); + let (proof_root_hash, proof_returned_contract) = Drive::verify_contract( + contract_proof.as_slice(), + None, + false, + false, + contract.id().into_buffer(), platform_version, + ) + .expect("expected to get contract from proof"); + + assert_eq!(root_hash, proof_root_hash); + assert_eq!( + contract, + proof_returned_contract.expect("expected to get a contract") + ); + } + + #[test] + fn test_contract_keeps_history_fetch_and_verification() { + let (drive, contract) = setup_references_tests(10, 3334); + + let platform_version = PlatformVersion::latest(); + + let root_hash = drive + .grove + .root_hash(None, &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + drive + .fetch_contract( + contract.id().to_buffer(), + None, + None, + None, + platform_version, + ) + .unwrap() + .expect("expected to be able to fetch a contract") + .expect("expected a contract to be present"); + + let contract_proof = drive + .prove_contract(contract.id().into_buffer(), None, platform_version) + .expect("expected to get proof"); + let (proof_root_hash, proof_returned_contract) = Drive::verify_contract( + contract_proof.as_slice(), None, + false, + false, + contract.id().into_buffer(), + platform_version, ) - .expect("document should be inserted"); + .expect("expected to get contract from proof"); - drive - .grove - .commit_transaction(db_transaction) - .unwrap() - .expect("transaction should be committed"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!( + contract, + proof_returned_contract.expect("expected to get a contract") + ); + } - let db_transaction = drive.grove.start_transaction(); + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_first_version() { + let platform_version = PlatformVersion::first(); + let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, &platform_version); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 11, 67, 98, 193, 214, 56, 66, 244, 193, 131, 252, 190, 5, 52, 29, 96, 160, 27, 222, 78, 91, - 150, 54, 85, 81, 249, 14, 74, 213, 181, 254, 120, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // let all_names = [ - // "".to_string(), x2 - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - // A query getting one element starting with a in dash parent domain asc - - let encoded_start_at = bs58::encode(domain0_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAfter": encoded_start_at, - "limit": 3, - "orderBy": [ - ["normalizedLabel", "asc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - - // We are commenting this out on purpose to make it easier to find - // let mut query_operations: Vec = vec![]; - // let path_query = query - // .construct_path_query_operations(&drive, Some(&db_transaction), &mut query_operations) - // .expect("expected to construct a path query"); - // println!("{:#?}", path_query); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - let normalized_label_value = document - .get("normalizedLabel") - .cloned() - .unwrap_or(Value::Null); - if normalized_label_value.is_null() { - String::from("") - } else { + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 142, 246, 25, 166, 52, 184, 158, 102, 192, 111, 173, 255, 155, 125, 53, 233, 98, 241, + 201, 233, 2, 58, 47, 90, 209, 207, 147, 204, 83, 68, 183, 143, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + let all_names = [ + "amalle".to_string(), + "anna-diane".to_string(), + "atalanta".to_string(), + "eden".to_string(), + "laureen".to_string(), + "leone".to_string(), + "marilyn".to_string(), + "minna".to_string(), + "mora".to_string(), + "phillie".to_string(), + ]; + + // A query getting all elements by firstName + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "limit": 100, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); let normalized_label = normalized_label_value .as_text() .expect("the normalized label should be a string"); String::from(normalized_label) - } - }) - .collect(); + }) + .collect(); - let a_names = ["amalle".to_string(), "anna-diane".to_string()]; + assert_eq!(names, all_names); - assert_eq!(names, a_names); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + // A query getting all elements starting with a in dash parent domain -#[cfg(feature = "server")] -#[test] -fn test_dpns_query_start_after_with_null_id_desc() { - // The point of this test is to test the situation where we have a start at inside an index with a null value - // While dpns doesn't really support this, other contracts might allow null values. - // We are just using the DPNS contract because it is handy. - let (drive, contract) = setup_dpns_tests_label_not_required(10, 11456); + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"], + ["normalizedLabel", "startsWith", "a"] + ], + "limit": 5, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = [ + "amalle".to_string(), + "anna-diane".to_string(), + "atalanta".to_string(), + ]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + let ids: Vec = results + .into_iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + hex::encode(document.id().as_slice()) + }) + .collect(); - let platform_version = PlatformVersion::latest(); + let a_ids = [ + "61978359176813a3e9b79c07df8addda2aea3841cfff2afe5b23cf1b5b926c1b".to_string(), + "0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080".to_string(), + "26a9344b6d0fcf8f525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a".to_string(), + ]; - let document_type = contract - .document_type_for_name("domain") - .expect("expected to get document type"); + assert_eq!(ids, a_ids); - let db_transaction = drive.grove.start_transaction(); + // A query getting one element starting with a in dash parent domain asc - let mut rng = rand::rngs::StdRng::seed_from_u64(11456); + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); - let domain0_id = Identifier::random_with_rng(&mut rng); - let domain0 = Domain { - id: domain0_id, - owner_id: Identifier::random_with_rng(&mut rng), - label: None, - normalized_label: None, - normalized_parent_domain_name: "dash".to_string(), - records: Records { - dash_unique_identity_id: Identifier::random_with_rng(&mut rng), - }, - preorder_salt: Bytes32::random_with_rng(&mut rng), - subdomain_rules: SubdomainRules { - allow_subdomains: false, - }, - }; + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"], + ["normalizedLabel", "startsWith", "a"] + ], + "startAt": encoded_start_at, + "limit": 1, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); - let value0 = serde_json::to_value(domain0).expect("serialized domain"); - let document_cbor0 = cbor_serializer::serializable_value_to_cbor(&value0, Some(0)) - .expect("expected to serialize to cbor"); - let document0 = Document::from_cbor(document_cbor0.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); + let a_names = ["anna-diane".to_string()]; - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + assert_eq!(names, a_names); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document0, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + // A query getting one element starting with a in dash parent domain desc + + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"], + ["normalizedLabel", "startsWith", "a"] + ], + "startAt": encoded_start_at, + "limit": 1, + "orderBy": [ + ["normalizedLabel", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = ["anna-diane".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + let record_id_base68: Vec = results + .into_iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + + let records_value = document + .get("records") + .expect("we should be able to get the records"); + let map_records_value = records_value.as_map().expect("this should be a map"); + let record_dash_unique_identity_id = + Value::inner_optional_bytes_value(map_records_value, "dashUniqueIdentityId") + .unwrap() + .expect("there should be a dashUniqueIdentityId"); + bs58::encode(record_dash_unique_identity_id).into_string() + }) + .collect(); + + let a_record_id_base58 = ["5hXRj1xmmnNQ7RN1ATYym4x6bQugxcKn7FWiMnkQTQpF".to_string()]; + + assert_eq!(record_id_base68, a_record_id_base58); + + // A query getting elements by the identity desc + + let query_value = json!({ + "where": [ + ["records.dashUniqueIdentityId", "<=", "5hXRj1xmmnNQ7RN1ATYym4x6bQugxcKn7FWiMnkQTQpF"], + ], + "limit": 10, + "orderBy": [ + ["records.dashUniqueIdentityId", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = [ + "anna-diane".to_string(), + "marilyn".to_string(), + "minna".to_string(), + ]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + // A query getting 2 elements asc by the identity + + let query_value = json!({ + "where": [ + ["records.dashUniqueIdentityId", "<=", "5hXRj1xmmnNQ7RN1ATYym4x6bQugxcKn7FWiMnkQTQpF"], + ], + "limit": 2, + "orderBy": [ + ["records.dashUniqueIdentityId", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = ["minna".to_string(), "marilyn".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + // A query getting all elements + + let query_value = json!({ + "orderBy": [ + ["records.dashUniqueIdentityId", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + + assert_eq!(results.len(), 10); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_insertion_no_aliases() { + // using ascending order with rangeTo operators + let (drive, contract) = + setup_dpns_test_with_data("tests/supporting_files/contract/dpns/domains-no-alias.json"); + + let platform_version = PlatformVersion::latest(); + + let db_transaction = drive.grove.start_transaction(); + + let query_value = json!({ + "orderBy": [["records.dashUniqueIdentityId", "desc"]], + }); + + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + + let result = drive + .query_documents_cbor_from_contract( + &contract, + domain_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("should perform query"); + + assert_eq!(result.0.len(), 15); + + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + domain_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!( + drive + .grove + .root_hash(None, &platform_version.drive.grove_version) + .unwrap() + .expect("should get root hash"), + proof_root_hash + ); + assert_eq!(result.0, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_insertion_with_aliases() { + // using ascending order with rangeTo operators + let (drive, contract) = + setup_dpns_test_with_data("tests/supporting_files/contract/dpns/domains.json"); + + let platform_version = PlatformVersion::latest(); + + let db_transaction = drive.grove.start_transaction(); + + let query_value = json!({ + "orderBy": [["records.dashUniqueIdentityId", "desc"]], + }); + + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + + let result = drive + .query_documents_cbor_from_contract( + &contract, + domain_document_type, + query_cbor.as_slice(), + None, + Some(&db_transaction), + Some(platform_version.protocol_version), + ) + .expect("should perform query"); + + assert_eq!(result.0.len(), 24); + + let (proof_root_hash, proof_results, _) = drive + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( + &contract, + domain_document_type, + query_cbor.as_slice(), + None, + None, + Some(platform_version.protocol_version), + ) + .expect("query should be executed"); + assert_eq!( + drive + .grove + .root_hash(None, &platform_version.drive.grove_version) + .unwrap() + .expect("should get root hash"), + proof_root_hash + ); + assert_eq!(result.0, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_at_first_version() { + let platform_version = PlatformVersion::first(); + // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. + let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + + let platform_version = PlatformVersion::latest(); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 142, 246, 25, 166, 52, 184, 158, 102, 192, 111, 173, 255, 155, 125, 53, 233, 98, 241, + 201, 233, 2, 58, 47, 90, 209, 207, 147, 204, 83, 68, 183, 143, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash,); + + // let all_names = [ + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + // A query getting one element starting with a in dash parent domain asc + + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAt": encoded_start_at, + "limit": 1, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = ["anna-diane".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_at_latest_version() { + let platform_version = PlatformVersion::latest(); + // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. + let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + + let platform_version = PlatformVersion::latest(); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 89, 134, 179, 83, 10, 119, 219, 251, 215, 151, 38, 111, 63, 245, 250, 229, 201, 136, + 190, 129, 75, 226, 88, 216, 93, 69, 152, 224, 156, 93, 170, 125, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash,); + + // let all_names = [ + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + // A query getting one element starting with a in dash parent domain asc + + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAt": encoded_start_at, + "limit": 1, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, ) - .expect("document should be inserted"); + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = ["anna-diane".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_after() { + let platform_version = PlatformVersion::latest(); + // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. + let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + + let platform_version = PlatformVersion::latest(); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 89, 134, 179, 83, 10, 119, 219, 251, 215, 151, 38, 111, 63, 245, 250, 229, 201, 136, + 190, 129, 75, 226, 88, 216, 93, 69, 152, 224, 156, 93, 170, 125, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // let all_names = [ + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + // A query getting one element starting with a in dash parent domain asc + + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAfter": encoded_start_at, + "limit": 2, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = ["atalanta".to_string(), "eden".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_at_desc() { + let platform_version = PlatformVersion::latest(); + // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. + let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + + let platform_version = PlatformVersion::latest(); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 89, 134, 179, 83, 10, 119, 219, 251, 215, 151, 38, 111, 63, 245, 250, 229, 201, 136, + 190, 129, 75, 226, 88, 216, 93, 69, 152, 224, 156, 93, 170, 125, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // let all_names = [ + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + // A query getting one element starting with a in dash parent domain asc + + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAt": encoded_start_at, + "limit": 2, + "orderBy": [ + ["normalizedLabel", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = ["anna-diane".to_string(), "amalle".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_after_desc() { + let platform_version = PlatformVersion::latest(); + // The point of this test is to test the situation where we have a start at a certain value for the DPNS query. + let (drive, contract) = setup_dpns_tests_with_batches(10, None, 11456, platform_version); + + let platform_version = PlatformVersion::latest(); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 89, 134, 179, 83, 10, 119, 219, 251, 215, 151, 38, 111, 63, 245, 250, 229, 201, 136, + 190, 129, 75, 226, 88, 216, 93, 69, 152, 224, 156, 93, 170, 125, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // let all_names = [ + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + // A query getting one element starting with a in dash parent domain asc + + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAfter": encoded_start_at, + "limit": 2, + "orderBy": [ + ["normalizedLabel", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .expect("we should be able to get the first name"); + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .collect(); + + let a_names = ["amalle".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_at_with_null_id() { + // The point of this test is to test the situation where we have a start at inside an index with a null value + // While dpns doesn't really support this, other contracts might allow null values. + // We are just using the DPNS contract because it is handy. + let (drive, contract) = setup_dpns_tests_label_not_required(10, 11456); + + let platform_version = PlatformVersion::latest(); + + let document_type = contract + .document_type_for_name("domain") + .expect("expected to get document type"); + + let db_transaction = drive.grove.start_transaction(); + + let mut rng = rand::rngs::StdRng::seed_from_u64(11456); + + let domain0_id = Identifier::random_with_rng(&mut rng); + let domain0 = Domain { + id: domain0_id, + owner_id: Identifier::random_with_rng(&mut rng), + label: None, + normalized_label: None, + normalized_parent_domain_name: "dash".to_string(), + records: Records { + dash_unique_identity_id: Identifier::random_with_rng(&mut rng), + }, + preorder_salt: Bytes32::random_with_rng(&mut rng), + subdomain_rules: SubdomainRules { + allow_subdomains: false, + }, + }; + + let value0 = platform_value::to_value(domain0).expect("serialized domain"); + let document0 = Document::from_platform_value(value0, platform_version) + .expect("document should be properly deserialized"); + + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document0, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); + + let domain1_id = Identifier::random_with_rng(&mut rng); + + let domain1 = Domain { + id: domain1_id, + owner_id: Identifier::random_with_rng(&mut rng), + label: None, + normalized_label: None, + normalized_parent_domain_name: "dash".to_string(), + records: Records { + dash_unique_identity_id: Identifier::random_with_rng(&mut rng), + }, + preorder_salt: Bytes32::random_with_rng(&mut rng), + subdomain_rules: SubdomainRules { + allow_subdomains: false, + }, + }; + + let value1 = serde_json::to_value(domain1).expect("serialized domain"); + let document_cbor1 = cbor_serializer::serializable_value_to_cbor(&value1, Some(0)) + .expect("expected to serialize to cbor"); + let document1 = + Document::from_cbor(document_cbor1.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); + + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document1, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); + + drive + .grove + .commit_transaction(db_transaction) + .unwrap() + .expect("transaction should be committed"); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 236, 213, 202, 143, 111, 54, 250, 174, 57, 239, 156, 18, 122, 223, 88, 20, 13, 180, 89, + 144, 31, 20, 138, 189, 2, 148, 160, 95, 231, 108, 216, 163, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // let all_names = [ + // "".to_string(), x2 + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + // A query getting one element starting with a in dash parent domain asc + + let encoded_start_at = bs58::encode(domain0_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAt": encoded_start_at, + "limit": 3, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document + .get("normalizedLabel") + .map(|value| { + let normalized_label = value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .unwrap_or_default() + }) + .collect(); + + let a_names = [ + "".to_string(), + "amalle".to_string(), + "anna-diane".to_string(), + ]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_after_with_null_id() { + // The point of this test is to test the situation where we have a start at inside an index with a null value + // While dpns doesn't really support this, other contracts might allow null values. + // We are just using the DPNS contract because it is handy. + let (drive, contract) = setup_dpns_tests_label_not_required(10, 11456); + + let platform_version = PlatformVersion::latest(); + + let document_type = contract + .document_type_for_name("domain") + .expect("expected to get document type"); + + let db_transaction = drive.grove.start_transaction(); + + let mut rng = rand::rngs::StdRng::seed_from_u64(11456); + + let domain0_id = Identifier::random_with_rng(&mut rng); + let domain0 = Domain { + id: domain0_id, + owner_id: Identifier::random_with_rng(&mut rng), + label: None, + normalized_label: None, + normalized_parent_domain_name: "dash".to_string(), + records: Records { + dash_unique_identity_id: Identifier::random_with_rng(&mut rng), + }, + preorder_salt: Bytes32::random_with_rng(&mut rng), + subdomain_rules: SubdomainRules { + allow_subdomains: false, + }, + }; + + let value0 = serde_json::to_value(domain0).expect("serialized domain"); + let document_cbor0 = cbor_serializer::serializable_value_to_cbor(&value0, Some(0)) + .expect("expected to serialize to cbor"); + let document0 = + Document::from_cbor(document_cbor0.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); + + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document0, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); + + let domain1_id = Identifier::random_with_rng(&mut rng); + + assert!(domain0_id > domain1_id); + + let domain1 = Domain { + id: domain1_id, + owner_id: Identifier::random_with_rng(&mut rng), + label: None, + normalized_label: None, + normalized_parent_domain_name: "dash".to_string(), + records: Records { + dash_unique_identity_id: Identifier::random_with_rng(&mut rng), + }, + preorder_salt: Bytes32::random_with_rng(&mut rng), + subdomain_rules: SubdomainRules { + allow_subdomains: false, + }, + }; + + let value1 = serde_json::to_value(domain1).expect("serialized domain"); + let document_cbor1 = cbor_serializer::serializable_value_to_cbor(&value1, Some(0)) + .expect("expected to serialize to cbor"); + let document1 = + Document::from_cbor(document_cbor1.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); + + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document1, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); + + drive + .grove + .commit_transaction(db_transaction) + .unwrap() + .expect("transaction should be committed"); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 236, 213, 202, 143, 111, 54, 250, 174, 57, 239, 156, 18, 122, 223, 88, 20, 13, 180, 89, + 144, 31, 20, 138, 189, 2, 148, 160, 95, 231, 108, 216, 163, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // let all_names = [ + // "".to_string(), x2 + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + // A query getting one element starting with a in dash parent domain asc + + let encoded_start_at = bs58::encode(domain0_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAfter": encoded_start_at, + "limit": 3, + "orderBy": [ + ["normalizedLabel", "asc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + + // We are commenting this out on purpose to make it easier to find + // let mut query_operations: Vec = vec![]; + // let path_query = query + // .construct_path_query_operations(&drive, Some(&db_transaction), &mut query_operations) + // .expect("expected to construct a path query"); + // println!("{:#?}", path_query); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + let normalized_label_value = document + .get("normalizedLabel") + .cloned() + .unwrap_or(Value::Null); + if normalized_label_value.is_null() { + String::from("") + } else { + let normalized_label = normalized_label_value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + } + }) + .collect(); + + let a_names = ["amalle".to_string(), "anna-diane".to_string()]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } + + #[cfg(feature = "server")] + #[test] + fn test_dpns_query_start_after_with_null_id_desc() { + // The point of this test is to test the situation where we have a start at inside an index with a null value + // While dpns doesn't really support this, other contracts might allow null values. + // We are just using the DPNS contract because it is handy. + let (drive, contract) = setup_dpns_tests_label_not_required(10, 11456); + + let platform_version = PlatformVersion::latest(); + + let document_type = contract + .document_type_for_name("domain") + .expect("expected to get document type"); + + let db_transaction = drive.grove.start_transaction(); + + let mut rng = rand::rngs::StdRng::seed_from_u64(11456); + + let domain0_id = Identifier::random_with_rng(&mut rng); + let domain0 = Domain { + id: domain0_id, + owner_id: Identifier::random_with_rng(&mut rng), + label: None, + normalized_label: None, + normalized_parent_domain_name: "dash".to_string(), + records: Records { + dash_unique_identity_id: Identifier::random_with_rng(&mut rng), + }, + preorder_salt: Bytes32::random_with_rng(&mut rng), + subdomain_rules: SubdomainRules { + allow_subdomains: false, + }, + }; + + let value0 = serde_json::to_value(domain0).expect("serialized domain"); + let document_cbor0 = cbor_serializer::serializable_value_to_cbor(&value0, Some(0)) + .expect("expected to serialize to cbor"); + let document0 = + Document::from_cbor(document_cbor0.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); + + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document0, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); + + let domain1_id = Identifier::random_with_rng(&mut rng); + + let domain1 = Domain { + id: domain1_id, + owner_id: Identifier::random_with_rng(&mut rng), + label: None, + normalized_label: None, + normalized_parent_domain_name: "dash".to_string(), + records: Records { + dash_unique_identity_id: Identifier::random_with_rng(&mut rng), + }, + preorder_salt: Bytes32::random_with_rng(&mut rng), + subdomain_rules: SubdomainRules { + allow_subdomains: false, + }, + }; + + let value1 = serde_json::to_value(domain1).expect("serialized domain"); + let document_cbor1 = cbor_serializer::serializable_value_to_cbor(&value1, Some(0)) + .expect("expected to serialize to cbor"); + let document1 = + Document::from_cbor(document_cbor1.as_slice(), None, None, platform_version) + .expect("document should be properly deserialized"); + + let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefInfo((&document1, storage_flags)), + owner_id: None, + }, + contract: &contract, + document_type, + }, + true, + BlockInfo::genesis(), + true, + Some(&db_transaction), + platform_version, + None, + ) + .expect("document should be inserted"); + + drive + .grove + .commit_transaction(db_transaction) + .unwrap() + .expect("transaction should be committed"); + + let db_transaction = drive.grove.start_transaction(); + + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 236, 213, 202, 143, 111, 54, 250, 174, 57, 239, 156, 18, 122, 223, 88, 20, 13, 180, 89, + 144, 31, 20, 138, 189, 2, 148, 160, 95, 231, 108, 216, 163, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash,); + + // let all_names = [ + // "".to_string(), x2 + // "amalle".to_string(), + // "anna-diane".to_string(), + // "atalanta".to_string(), + // "eden".to_string(), + // "laureen".to_string(), + // "leone".to_string(), + // "marilyn".to_string(), + // "minna".to_string(), + // "mora".to_string(), + // "phillie".to_string(), + // ]; + + assert_eq!( + hex::encode(domain0_id.as_slice()), + "8795eaa85e6f39a0d99ac8642a39e273204c57b1594dcd4f53f549fb5160fa32" + ); + assert_eq!( + hex::encode(domain1_id.as_slice()), + "0baa338e26a9344b6d0fcf8f525dfc160c160a7a52ef3301a7e55fccf41d7385" + ); + + // A query getting two elements starting with domain0 + // We should get domain0 only because we have an ascending order on the ids always + // And also because there is nothing below "" + let encoded_start_at = bs58::encode(domain0_id).into_string(); + + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAt": encoded_start_at, + "limit": 2, + "orderBy": [ + ["normalizedLabel", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let docs: Vec> = results + .clone() + .into_iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document.id().to_vec() + }) + .collect(); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + + // The explanation is a little interesting + // domain1 is smaller than domain0 + // however on the lowest lever the order never matters, so we are always ascending on the id + // hence we will get domain1 + let expected_docs = [domain0_id.to_vec()]; + + assert_eq!(docs, expected_docs); - let domain1_id = Identifier::random_with_rng(&mut rng); + // A query getting two elements starting with domain1 + // We should get domain1, domain0 only because we have an ascending order on the ids always + let encoded_start_at = bs58::encode(domain1_id).into_string(); - let domain1 = Domain { - id: domain1_id, - owner_id: Identifier::random_with_rng(&mut rng), - label: None, - normalized_label: None, - normalized_parent_domain_name: "dash".to_string(), - records: Records { - dash_unique_identity_id: Identifier::random_with_rng(&mut rng), - }, - preorder_salt: Bytes32::random_with_rng(&mut rng), - subdomain_rules: SubdomainRules { - allow_subdomains: false, - }, - }; + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAt": encoded_start_at, + "limit": 2, + "orderBy": [ + ["normalizedLabel", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let docs: Vec> = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document.id().to_vec() + }) + .collect(); - let value1 = serde_json::to_value(domain1).expect("serialized domain"); - let document_cbor1 = cbor_serializer::serializable_value_to_cbor(&value1, Some(0)) - .expect("expected to serialize to cbor"); - let document1 = Document::from_cbor(document_cbor1.as_slice(), None, None, platform_version) - .expect("document should be properly deserialized"); + // The explanation is a little interesting + // domain1 is smaller than domain0 + // however on the lowest lever the order never matters, so we are always ascending on the id + // hence we will get domain1 + let expected_docs = [domain1_id.to_vec(), domain0_id.to_vec()]; - let storage_flags = Some(Cow::Owned(StorageFlags::SingleEpoch(0))); + assert_eq!(docs, expected_docs); + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); - drive - .add_document_for_contract( - DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentRefInfo((&document1, storage_flags)), - owner_id: None, - }, - contract: &contract, - document_type, - }, - true, - BlockInfo::genesis(), - true, - Some(&db_transaction), - platform_version, - None, - ) - .expect("document should be inserted"); + // A query getting one element starting with a in dash parent domain asc - drive - .grove - .commit_transaction(db_transaction) - .unwrap() - .expect("transaction should be committed"); + let anna_id = + hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") + .expect("expected to decode id"); + let encoded_start_at = bs58::encode(anna_id).into_string(); - let db_transaction = drive.grove.start_transaction(); + let query_value = json!({ + "where": [ + ["normalizedParentDomainName", "==", "dash"] + ], + "startAfter": encoded_start_at, + "limit": 2, + "orderBy": [ + ["normalizedLabel", "desc"] + ] + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("domain") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document + .get("normalizedLabel") + .map(|value| { + let normalized_label = value + .as_text() + .expect("the normalized label should be a string"); + String::from(normalized_label) + }) + .unwrap_or_default() + }) + .collect(); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 11, 67, 98, 193, 214, 56, 66, 244, 193, 131, 252, 190, 5, 52, 29, 96, 160, 27, 222, 78, 91, - 150, 54, 85, 81, 249, 14, 74, 213, 181, 254, 120, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash,); - - // let all_names = [ - // "".to_string(), x2 - // "amalle".to_string(), - // "anna-diane".to_string(), - // "atalanta".to_string(), - // "eden".to_string(), - // "laureen".to_string(), - // "leone".to_string(), - // "marilyn".to_string(), - // "minna".to_string(), - // "mora".to_string(), - // "phillie".to_string(), - // ]; - - assert_eq!( - hex::encode(domain0_id.as_slice()), - "8795eaa85e6f39a0d99ac8642a39e273204c57b1594dcd4f53f549fb5160fa32" - ); - assert_eq!( - hex::encode(domain1_id.as_slice()), - "0baa338e26a9344b6d0fcf8f525dfc160c160a7a52ef3301a7e55fccf41d7385" - ); + let a_names = ["amalle".to_string(), "".to_string()]; - // A query getting two elements starting with domain0 - // We should get domain0 only because we have an ascending order on the ids always - // And also because there is nothing below "" - let encoded_start_at = bs58::encode(domain0_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAt": encoded_start_at, - "limit": 2, - "orderBy": [ - ["normalizedLabel", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let docs: Vec> = results - .clone() - .into_iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document.id().to_vec() - }) - .collect(); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // The explanation is a little interesting - // domain1 is smaller than domain0 - // however on the lowest lever the order never matters, so we are always ascending on the id - // hence we will get domain1 - let expected_docs = [domain0_id.to_vec()]; - - assert_eq!(docs, expected_docs); - - // A query getting two elements starting with domain1 - // We should get domain1, domain0 only because we have an ascending order on the ids always - let encoded_start_at = bs58::encode(domain1_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAt": encoded_start_at, - "limit": 2, - "orderBy": [ - ["normalizedLabel", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let docs: Vec> = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document.id().to_vec() - }) - .collect(); - - // The explanation is a little interesting - // domain1 is smaller than domain0 - // however on the lowest lever the order never matters, so we are always ascending on the id - // hence we will get domain1 - let expected_docs = [domain1_id.to_vec(), domain0_id.to_vec()]; - - assert_eq!(docs, expected_docs); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); - - // A query getting one element starting with a in dash parent domain asc - - let anna_id = hex::decode("0e97eb86ceca4309751616089336a127a5d48282712473b2d0fc5663afb1a080") - .expect("expected to decode id"); - let encoded_start_at = bs58::encode(anna_id).into_string(); - - let query_value = json!({ - "where": [ - ["normalizedParentDomainName", "==", "dash"] - ], - "startAfter": encoded_start_at, - "limit": 2, - "orderBy": [ - ["normalizedLabel", "desc"] - ] - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("domain") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document - .get("normalizedLabel") - .map(|value| { - let normalized_label = value - .as_text() - .expect("the normalized label should be a string"); - String::from(normalized_label) - }) - .unwrap_or_default() - }) - .collect(); + assert_eq!(names, a_names); - let a_names = ["amalle".to_string(), "".to_string()]; + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } - assert_eq!(names, a_names); + #[cfg(feature = "server")] + #[test] + fn test_withdrawals_query_by_owner_id() { + // We create 10 withdrawals owned by 2 identities + let (drive, contract) = setup_withdrawal_tests(10, Some(2), 11456); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let platform_version = PlatformVersion::latest(); -#[cfg(feature = "server")] -#[test] -fn test_withdrawals_query_by_owner_id() { - // We create 10 withdrawals owned by 2 identities - let (drive, contract) = setup_withdrawal_tests(10, Some(2), 11456); + let db_transaction = drive.grove.start_transaction(); - let platform_version = PlatformVersion::latest(); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 174, 178, 50, 69, 201, 231, 248, 75, 88, 168, 83, 29, 141, 40, 117, 63, 157, 205, 24, + 56, 113, 108, 224, 27, 225, 24, 134, 153, 157, 130, 80, 200, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // Document Ids are + // document v0 : id:2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:09 updated_at:2024-11-21 12:31:09 amount:(i64)646767 coreFeePerByte:(i64)0 outputScript:bytes 00952c808390e575c8dd29fc07ccfed7b428e1ec2ffcb23e pooling:(i64)0 status:(i64)1 transactionIndex:(i64)4 transactionSignHeight:(i64)303186 + // document v0 : id:3T4aKmidGKA4ETnWYSedm6ETzrcdkfPL2r3D6eg6CSib owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)971045 coreFeePerByte:(i64)0 outputScript:bytes 525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a4d pooling:(i64)0 status:(i64)1 transactionIndex:(i64)2 transactionSignHeight:(i64)248787 + // document v0 : id:3X2QfUfR8EeVZQAKmEjcue5xDv3CZXrfPTgXkZ5vQo13 owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)122155 coreFeePerByte:(i64)0 outputScript:bytes f76eb8b953ff41040d906c25a4ae42884bedb41a07fc3a pooling:(i64)0 status:(i64)3 transactionIndex:(i64)7 transactionSignHeight:(i64)310881 + // document v0 : id:5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:30:59 updated_at:2024-11-21 12:30:59 amount:(i64)725014 coreFeePerByte:(i64)0 outputScript:bytes 51f203a755a7ff25ba8645841f80403ee98134690b2c0dd5e2 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)1 transactionSignHeight:(i64)4072 + // document v0 : id:74giZJn9fNczYRsxxh3wVnktJS1vzTiRWYinKK1rRcyj owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)151943 coreFeePerByte:(i64)0 outputScript:bytes 9db03f4c8a51e4e9855e008aae6121911b4831699c53ed pooling:(i64)0 status:(i64)1 transactionIndex:(i64)5 transactionSignHeight:(i64)343099 + // document v0 : id:8iqDAFxTzHYcmUWtcNnCRoj9Fss4HE1G3GP3HhVAZJhn owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:13 updated_at:2024-11-21 12:31:13 amount:(i64)409642 coreFeePerByte:(i64)0 outputScript:bytes 19fe0a2458a47e1726191f4dc94d11bcfacf821d024043 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)8 transactionSignHeight:(i64)304397 + // document v0 : id:BdH274iP17nhquQVY4KMCAM6nwyPRc8AFJkUT91vxhbc owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:03 updated_at:2024-11-21 12:31:03 amount:(i64)81005 coreFeePerByte:(i64)0 outputScript:bytes 2666e87b6cc7ddf2b63e7e52c348818c05e5562efa48f5 pooling:(i64)0 status:(i64)0 + // document v0 : id:CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)455074 coreFeePerByte:(i64)0 outputScript:bytes acde2e1652771b50a2c68fd330ee1d4b8e115631ce72375432 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)3 transactionSignHeight:(i64)261103 + // document v0 : id:DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:05 updated_at:2024-11-21 12:31:05 amount:(i64)271303 coreFeePerByte:(i64)0 outputScript:bytes 0b845e8c3a4679f1913172f7fd939cc153f458519de8ed3d pooling:(i64)0 status:(i64)0 + // document v0 : id:FDnvFN7e72LcZEojTWNmJTP7uzok3BtvbKnaa5gjqCpW owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)123433 coreFeePerByte:(i64)0 outputScript:bytes 82712473b2d0fc5663afb1a08006913ccccbf38e091a8cc7 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)6 transactionSignHeight:(i64)319518 + + let query_value = json!({ + "where": [ + ["$ownerId", "==", "A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ"] + ], + "limit": 2 + }); + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("withdrawal") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document.id().to_string(Encoding::Base58) + }) + .collect(); - let db_transaction = drive.grove.start_transaction(); + let a_names = [ + "5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV".to_string(), + "CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD".to_string(), + ]; - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 27, 172, 224, 121, 173, 248, 170, 255, 202, 209, 7, 83, 203, 185, 208, 102, 157, 75, 55, - 21, 217, 212, 91, 174, 144, 146, 135, 92, 97, 156, 166, 6, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // Document Ids are - // document v0 : id:2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:09 updated_at:2024-11-21 12:31:09 amount:(i64)646767 coreFeePerByte:(i64)0 outputScript:bytes 00952c808390e575c8dd29fc07ccfed7b428e1ec2ffcb23e pooling:(i64)0 status:(i64)1 transactionIndex:(i64)4 transactionSignHeight:(i64)303186 - // document v0 : id:3T4aKmidGKA4ETnWYSedm6ETzrcdkfPL2r3D6eg6CSib owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)971045 coreFeePerByte:(i64)0 outputScript:bytes 525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a4d pooling:(i64)0 status:(i64)1 transactionIndex:(i64)2 transactionSignHeight:(i64)248787 - // document v0 : id:3X2QfUfR8EeVZQAKmEjcue5xDv3CZXrfPTgXkZ5vQo13 owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)122155 coreFeePerByte:(i64)0 outputScript:bytes f76eb8b953ff41040d906c25a4ae42884bedb41a07fc3a pooling:(i64)0 status:(i64)3 transactionIndex:(i64)7 transactionSignHeight:(i64)310881 - // document v0 : id:5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:30:59 updated_at:2024-11-21 12:30:59 amount:(i64)725014 coreFeePerByte:(i64)0 outputScript:bytes 51f203a755a7ff25ba8645841f80403ee98134690b2c0dd5e2 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)1 transactionSignHeight:(i64)4072 - // document v0 : id:74giZJn9fNczYRsxxh3wVnktJS1vzTiRWYinKK1rRcyj owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)151943 coreFeePerByte:(i64)0 outputScript:bytes 9db03f4c8a51e4e9855e008aae6121911b4831699c53ed pooling:(i64)0 status:(i64)1 transactionIndex:(i64)5 transactionSignHeight:(i64)343099 - // document v0 : id:8iqDAFxTzHYcmUWtcNnCRoj9Fss4HE1G3GP3HhVAZJhn owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:13 updated_at:2024-11-21 12:31:13 amount:(i64)409642 coreFeePerByte:(i64)0 outputScript:bytes 19fe0a2458a47e1726191f4dc94d11bcfacf821d024043 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)8 transactionSignHeight:(i64)304397 - // document v0 : id:BdH274iP17nhquQVY4KMCAM6nwyPRc8AFJkUT91vxhbc owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:03 updated_at:2024-11-21 12:31:03 amount:(i64)81005 coreFeePerByte:(i64)0 outputScript:bytes 2666e87b6cc7ddf2b63e7e52c348818c05e5562efa48f5 pooling:(i64)0 status:(i64)0 - // document v0 : id:CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)455074 coreFeePerByte:(i64)0 outputScript:bytes acde2e1652771b50a2c68fd330ee1d4b8e115631ce72375432 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)3 transactionSignHeight:(i64)261103 - // document v0 : id:DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:05 updated_at:2024-11-21 12:31:05 amount:(i64)271303 coreFeePerByte:(i64)0 outputScript:bytes 0b845e8c3a4679f1913172f7fd939cc153f458519de8ed3d pooling:(i64)0 status:(i64)0 - // document v0 : id:FDnvFN7e72LcZEojTWNmJTP7uzok3BtvbKnaa5gjqCpW owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)123433 coreFeePerByte:(i64)0 outputScript:bytes 82712473b2d0fc5663afb1a08006913ccccbf38e091a8cc7 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)6 transactionSignHeight:(i64)319518 - - let query_value = json!({ - "where": [ - ["$ownerId", "==", "A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ"] - ], - "limit": 2 - }); - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("withdrawal") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document.id().to_string(Encoding::Base58) - }) - .collect(); + assert_eq!(names, a_names); - let a_names = [ - "5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV".to_string(), - "CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD".to_string(), - ]; + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } - assert_eq!(names, a_names); + #[cfg(feature = "server")] + #[test] + fn test_withdrawals_query_start_after_query_by_owner_id() { + // We create 10 withdrawals owned by 2 identities + let (drive, contract) = setup_withdrawal_tests(10, Some(2), 11456); - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let platform_version = PlatformVersion::latest(); -#[cfg(feature = "server")] -#[test] -fn test_withdrawals_query_start_after_query_by_owner_id() { - // We create 10 withdrawals owned by 2 identities - let (drive, contract) = setup_withdrawal_tests(10, Some(2), 11456); + let db_transaction = drive.grove.start_transaction(); - let platform_version = PlatformVersion::latest(); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 174, 178, 50, 69, 201, 231, 248, 75, 88, 168, 83, 29, 141, 40, 117, 63, 157, 205, 24, + 56, 113, 108, 224, 27, 225, 24, 134, 153, 157, 130, 80, 200, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // Document Ids are + // document v0 : id:2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:09 updated_at:2024-11-21 12:31:09 amount:(i64)646767 coreFeePerByte:(i64)0 outputScript:bytes 00952c808390e575c8dd29fc07ccfed7b428e1ec2ffcb23e pooling:(i64)0 status:(i64)1 transactionIndex:(i64)4 transactionSignHeight:(i64)303186 + // document v0 : id:3T4aKmidGKA4ETnWYSedm6ETzrcdkfPL2r3D6eg6CSib owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)971045 coreFeePerByte:(i64)0 outputScript:bytes 525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a4d pooling:(i64)0 status:(i64)1 transactionIndex:(i64)2 transactionSignHeight:(i64)248787 + // document v0 : id:3X2QfUfR8EeVZQAKmEjcue5xDv3CZXrfPTgXkZ5vQo13 owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)122155 coreFeePerByte:(i64)0 outputScript:bytes f76eb8b953ff41040d906c25a4ae42884bedb41a07fc3a pooling:(i64)0 status:(i64)3 transactionIndex:(i64)7 transactionSignHeight:(i64)310881 + // document v0 : id:5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:30:59 updated_at:2024-11-21 12:30:59 amount:(i64)725014 coreFeePerByte:(i64)0 outputScript:bytes 51f203a755a7ff25ba8645841f80403ee98134690b2c0dd5e2 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)1 transactionSignHeight:(i64)4072 + // document v0 : id:74giZJn9fNczYRsxxh3wVnktJS1vzTiRWYinKK1rRcyj owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)151943 coreFeePerByte:(i64)0 outputScript:bytes 9db03f4c8a51e4e9855e008aae6121911b4831699c53ed pooling:(i64)0 status:(i64)1 transactionIndex:(i64)5 transactionSignHeight:(i64)343099 + // document v0 : id:8iqDAFxTzHYcmUWtcNnCRoj9Fss4HE1G3GP3HhVAZJhn owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:13 updated_at:2024-11-21 12:31:13 amount:(i64)409642 coreFeePerByte:(i64)0 outputScript:bytes 19fe0a2458a47e1726191f4dc94d11bcfacf821d024043 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)8 transactionSignHeight:(i64)304397 + // document v0 : id:BdH274iP17nhquQVY4KMCAM6nwyPRc8AFJkUT91vxhbc owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:03 updated_at:2024-11-21 12:31:03 amount:(i64)81005 coreFeePerByte:(i64)0 outputScript:bytes 2666e87b6cc7ddf2b63e7e52c348818c05e5562efa48f5 pooling:(i64)0 status:(i64)0 + // document v0 : id:CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)455074 coreFeePerByte:(i64)0 outputScript:bytes acde2e1652771b50a2c68fd330ee1d4b8e115631ce72375432 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)3 transactionSignHeight:(i64)261103 + // document v0 : id:DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:05 updated_at:2024-11-21 12:31:05 amount:(i64)271303 coreFeePerByte:(i64)0 outputScript:bytes 0b845e8c3a4679f1913172f7fd939cc153f458519de8ed3d pooling:(i64)0 status:(i64)0 + // document v0 : id:FDnvFN7e72LcZEojTWNmJTP7uzok3BtvbKnaa5gjqCpW owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)123433 coreFeePerByte:(i64)0 outputScript:bytes 82712473b2d0fc5663afb1a08006913ccccbf38e091a8cc7 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)6 transactionSignHeight:(i64)319518 + + let query_value = json!({ + "where": [ + ["$ownerId", "==", "A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ"] + ], + "startAfter": "CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD", + "limit": 3, + }); + + // This will use the identity recent index + // { + // "name": "identityRecent", + // "properties": [ + // { + // "$ownerId": "asc" + // }, + // { + // "$updatedAt": "asc" + // }, + // { + // "status": "asc" + // } + // ], + // "unique": false + // }, + + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("withdrawal") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document.id().to_string(Encoding::Base58) + }) + .collect(); + + // We only get back 2 values, even though we put limit 3 because the time with status 0 is an + // empty tree and consumes a limit + let a_names = [ + "DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou".to_string(), + "2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu".to_string(), + ]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } - let db_transaction = drive.grove.start_transaction(); + #[cfg(feature = "server")] + #[test] + fn test_withdrawals_query_start_after_query_by_owner_id_desc() { + // We create 10 withdrawals owned by 2 identities + let (drive, contract) = setup_withdrawal_tests(10, Some(2), 11456); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 27, 172, 224, 121, 173, 248, 170, 255, 202, 209, 7, 83, 203, 185, 208, 102, 157, 75, 55, - 21, 217, 212, 91, 174, 144, 146, 135, 92, 97, 156, 166, 6, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // Document Ids are - // document v0 : id:2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:09 updated_at:2024-11-21 12:31:09 amount:(i64)646767 coreFeePerByte:(i64)0 outputScript:bytes 00952c808390e575c8dd29fc07ccfed7b428e1ec2ffcb23e pooling:(i64)0 status:(i64)1 transactionIndex:(i64)4 transactionSignHeight:(i64)303186 - // document v0 : id:3T4aKmidGKA4ETnWYSedm6ETzrcdkfPL2r3D6eg6CSib owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)971045 coreFeePerByte:(i64)0 outputScript:bytes 525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a4d pooling:(i64)0 status:(i64)1 transactionIndex:(i64)2 transactionSignHeight:(i64)248787 - // document v0 : id:3X2QfUfR8EeVZQAKmEjcue5xDv3CZXrfPTgXkZ5vQo13 owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)122155 coreFeePerByte:(i64)0 outputScript:bytes f76eb8b953ff41040d906c25a4ae42884bedb41a07fc3a pooling:(i64)0 status:(i64)3 transactionIndex:(i64)7 transactionSignHeight:(i64)310881 - // document v0 : id:5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:30:59 updated_at:2024-11-21 12:30:59 amount:(i64)725014 coreFeePerByte:(i64)0 outputScript:bytes 51f203a755a7ff25ba8645841f80403ee98134690b2c0dd5e2 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)1 transactionSignHeight:(i64)4072 - // document v0 : id:74giZJn9fNczYRsxxh3wVnktJS1vzTiRWYinKK1rRcyj owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)151943 coreFeePerByte:(i64)0 outputScript:bytes 9db03f4c8a51e4e9855e008aae6121911b4831699c53ed pooling:(i64)0 status:(i64)1 transactionIndex:(i64)5 transactionSignHeight:(i64)343099 - // document v0 : id:8iqDAFxTzHYcmUWtcNnCRoj9Fss4HE1G3GP3HhVAZJhn owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:13 updated_at:2024-11-21 12:31:13 amount:(i64)409642 coreFeePerByte:(i64)0 outputScript:bytes 19fe0a2458a47e1726191f4dc94d11bcfacf821d024043 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)8 transactionSignHeight:(i64)304397 - // document v0 : id:BdH274iP17nhquQVY4KMCAM6nwyPRc8AFJkUT91vxhbc owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:03 updated_at:2024-11-21 12:31:03 amount:(i64)81005 coreFeePerByte:(i64)0 outputScript:bytes 2666e87b6cc7ddf2b63e7e52c348818c05e5562efa48f5 pooling:(i64)0 status:(i64)0 - // document v0 : id:CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)455074 coreFeePerByte:(i64)0 outputScript:bytes acde2e1652771b50a2c68fd330ee1d4b8e115631ce72375432 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)3 transactionSignHeight:(i64)261103 - // document v0 : id:DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:05 updated_at:2024-11-21 12:31:05 amount:(i64)271303 coreFeePerByte:(i64)0 outputScript:bytes 0b845e8c3a4679f1913172f7fd939cc153f458519de8ed3d pooling:(i64)0 status:(i64)0 - // document v0 : id:FDnvFN7e72LcZEojTWNmJTP7uzok3BtvbKnaa5gjqCpW owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)123433 coreFeePerByte:(i64)0 outputScript:bytes 82712473b2d0fc5663afb1a08006913ccccbf38e091a8cc7 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)6 transactionSignHeight:(i64)319518 - - let query_value = json!({ - "where": [ - ["$ownerId", "==", "A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ"] - ], - "startAfter": "CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD", - "limit": 3, - }); - - // This will use the identity recent index - // { - // "name": "identityRecent", - // "properties": [ - // { - // "$ownerId": "asc" - // }, - // { - // "$updatedAt": "asc" - // }, - // { - // "status": "asc" - // } - // ], - // "unique": false - // }, - - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("withdrawal") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document.id().to_string(Encoding::Base58) - }) - .collect(); - - // We only get back 2 values, even though we put limit 3 because the time with status 0 is an - // empty tree and consumes a limit - let a_names = [ - "DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou".to_string(), - "2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu".to_string(), - ]; - - assert_eq!(names, a_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let platform_version = PlatformVersion::latest(); -#[cfg(feature = "server")] -#[test] -fn test_withdrawals_query_start_after_query_by_owner_id_desc() { - // We create 10 withdrawals owned by 2 identities - let (drive, contract) = setup_withdrawal_tests(10, Some(2), 11456); + let db_transaction = drive.grove.start_transaction(); - let platform_version = PlatformVersion::latest(); + let root_hash = drive + .grove + .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) + .unwrap() + .expect("there is always a root hash"); + + let expected_app_hash = vec![ + 174, 178, 50, 69, 201, 231, 248, 75, 88, 168, 83, 29, 141, 40, 117, 63, 157, 205, 24, + 56, 113, 108, 224, 27, 225, 24, 134, 153, 157, 130, 80, 200, + ]; + + assert_eq!(root_hash.as_slice(), expected_app_hash); + + // Document Ids are + // document v0 : id:2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:09 updated_at:2024-11-21 12:31:09 amount:(i64)646767 coreFeePerByte:(i64)0 outputScript:bytes 00952c808390e575c8dd29fc07ccfed7b428e1ec2ffcb23e pooling:(i64)0 status:(i64)1 transactionIndex:(i64)4 transactionSignHeight:(i64)303186 + // document v0 : id:3T4aKmidGKA4ETnWYSedm6ETzrcdkfPL2r3D6eg6CSib owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)971045 coreFeePerByte:(i64)0 outputScript:bytes 525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a4d pooling:(i64)0 status:(i64)1 transactionIndex:(i64)2 transactionSignHeight:(i64)248787 + // document v0 : id:3X2QfUfR8EeVZQAKmEjcue5xDv3CZXrfPTgXkZ5vQo13 owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)122155 coreFeePerByte:(i64)0 outputScript:bytes f76eb8b953ff41040d906c25a4ae42884bedb41a07fc3a pooling:(i64)0 status:(i64)3 transactionIndex:(i64)7 transactionSignHeight:(i64)310881 + // document v0 : id:5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:30:59 updated_at:2024-11-21 12:30:59 amount:(i64)725014 coreFeePerByte:(i64)0 outputScript:bytes 51f203a755a7ff25ba8645841f80403ee98134690b2c0dd5e2 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)1 transactionSignHeight:(i64)4072 + // document v0 : id:74giZJn9fNczYRsxxh3wVnktJS1vzTiRWYinKK1rRcyj owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)151943 coreFeePerByte:(i64)0 outputScript:bytes 9db03f4c8a51e4e9855e008aae6121911b4831699c53ed pooling:(i64)0 status:(i64)1 transactionIndex:(i64)5 transactionSignHeight:(i64)343099 + // document v0 : id:8iqDAFxTzHYcmUWtcNnCRoj9Fss4HE1G3GP3HhVAZJhn owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:13 updated_at:2024-11-21 12:31:13 amount:(i64)409642 coreFeePerByte:(i64)0 outputScript:bytes 19fe0a2458a47e1726191f4dc94d11bcfacf821d024043 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)8 transactionSignHeight:(i64)304397 + // document v0 : id:BdH274iP17nhquQVY4KMCAM6nwyPRc8AFJkUT91vxhbc owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:03 updated_at:2024-11-21 12:31:03 amount:(i64)81005 coreFeePerByte:(i64)0 outputScript:bytes 2666e87b6cc7ddf2b63e7e52c348818c05e5562efa48f5 pooling:(i64)0 status:(i64)0 + // document v0 : id:CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)455074 coreFeePerByte:(i64)0 outputScript:bytes acde2e1652771b50a2c68fd330ee1d4b8e115631ce72375432 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)3 transactionSignHeight:(i64)261103 + // document v0 : id:DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:05 updated_at:2024-11-21 12:31:05 amount:(i64)271303 coreFeePerByte:(i64)0 outputScript:bytes 0b845e8c3a4679f1913172f7fd939cc153f458519de8ed3d pooling:(i64)0 status:(i64)0 + // document v0 : id:FDnvFN7e72LcZEojTWNmJTP7uzok3BtvbKnaa5gjqCpW owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)123433 coreFeePerByte:(i64)0 outputScript:bytes 82712473b2d0fc5663afb1a08006913ccccbf38e091a8cc7 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)6 transactionSignHeight:(i64)319518 + + let query_value = json!({ + "where": [ + ["$ownerId", "==", "A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ"] + ], + "startAfter": "2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu", + "limit": 3, + "orderBy": [ + ["$updatedAt", "desc"] + ] + }); + + // This will use the identity recent index + // { + // "name": "identityRecent", + // "properties": [ + // { + // "$ownerId": "asc" + // }, + // { + // "$updatedAt": "asc" + // }, + // { + // "status": "asc" + // } + // ], + // "unique": false + // }, + + let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) + .expect("expected to serialize to cbor"); + let domain_document_type = contract + .document_type_for_name("withdrawal") + .expect("contract should have a domain document type"); + let query = DriveDocumentQuery::from_cbor( + where_cbor.as_slice(), + &contract, + domain_document_type, + &drive.config, + ) + .expect("query should be built"); + let (results, _, _) = query + .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) + .expect("proof should be executed"); + let names: Vec = results + .iter() + .map(|result| { + let document = + Document::from_bytes(result.as_slice(), domain_document_type, platform_version) + .expect("we should be able to deserialize the document"); + document.id().to_string(Encoding::Base58) + }) + .collect(); + + // We only get back 2 values, even though we put limit 3 because the time with status 0 is an + // empty tree and consumes a limit + let a_names = [ + "DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou".to_string(), + "CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD".to_string(), + ]; + + assert_eq!(names, a_names); + + let (proof_root_hash, proof_results, _) = query + .execute_with_proof_only_get_elements(&drive, None, None, platform_version) + .expect("we should be able to a proof"); + assert_eq!(root_hash, proof_root_hash); + assert_eq!(results, proof_results); + } - let db_transaction = drive.grove.start_transaction(); + #[cfg(feature = "server")] + #[test] + fn test_query_a_b_c_d_e_contract() { + let drive: Drive = setup_drive_with_initial_state_structure(None); - let root_hash = drive - .grove - .root_hash(Some(&db_transaction), &platform_version.drive.grove_version) - .unwrap() - .expect("there is always a root hash"); - - let expected_app_hash = vec![ - 27, 172, 224, 121, 173, 248, 170, 255, 202, 209, 7, 83, 203, 185, 208, 102, 157, 75, 55, - 21, 217, 212, 91, 174, 144, 146, 135, 92, 97, 156, 166, 6, - ]; - - assert_eq!(root_hash.as_slice(), expected_app_hash); - - // Document Ids are - // document v0 : id:2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:09 updated_at:2024-11-21 12:31:09 amount:(i64)646767 coreFeePerByte:(i64)0 outputScript:bytes 00952c808390e575c8dd29fc07ccfed7b428e1ec2ffcb23e pooling:(i64)0 status:(i64)1 transactionIndex:(i64)4 transactionSignHeight:(i64)303186 - // document v0 : id:3T4aKmidGKA4ETnWYSedm6ETzrcdkfPL2r3D6eg6CSib owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)971045 coreFeePerByte:(i64)0 outputScript:bytes 525dfc160c160a7a52ef3301a7e55fccf41d73857f50a55a4d pooling:(i64)0 status:(i64)1 transactionIndex:(i64)2 transactionSignHeight:(i64)248787 - // document v0 : id:3X2QfUfR8EeVZQAKmEjcue5xDv3CZXrfPTgXkZ5vQo13 owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)122155 coreFeePerByte:(i64)0 outputScript:bytes f76eb8b953ff41040d906c25a4ae42884bedb41a07fc3a pooling:(i64)0 status:(i64)3 transactionIndex:(i64)7 transactionSignHeight:(i64)310881 - // document v0 : id:5ikeRNwvFekr6ex32B4dLEcCaSsgXXHJBx5rJ2rwuhEV owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:30:59 updated_at:2024-11-21 12:30:59 amount:(i64)725014 coreFeePerByte:(i64)0 outputScript:bytes 51f203a755a7ff25ba8645841f80403ee98134690b2c0dd5e2 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)1 transactionSignHeight:(i64)4072 - // document v0 : id:74giZJn9fNczYRsxxh3wVnktJS1vzTiRWYinKK1rRcyj owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)151943 coreFeePerByte:(i64)0 outputScript:bytes 9db03f4c8a51e4e9855e008aae6121911b4831699c53ed pooling:(i64)0 status:(i64)1 transactionIndex:(i64)5 transactionSignHeight:(i64)343099 - // document v0 : id:8iqDAFxTzHYcmUWtcNnCRoj9Fss4HE1G3GP3HhVAZJhn owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:13 updated_at:2024-11-21 12:31:13 amount:(i64)409642 coreFeePerByte:(i64)0 outputScript:bytes 19fe0a2458a47e1726191f4dc94d11bcfacf821d024043 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)8 transactionSignHeight:(i64)304397 - // document v0 : id:BdH274iP17nhquQVY4KMCAM6nwyPRc8AFJkUT91vxhbc owner_id:CH1EHBkN5FUuQ7z8ep1abroLPzzYjagvM5XV2NYR3DEh created_at:2024-11-21 12:31:03 updated_at:2024-11-21 12:31:03 amount:(i64)81005 coreFeePerByte:(i64)0 outputScript:bytes 2666e87b6cc7ddf2b63e7e52c348818c05e5562efa48f5 pooling:(i64)0 status:(i64)0 - // document v0 : id:CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:01 updated_at:2024-11-21 12:31:01 amount:(i64)455074 coreFeePerByte:(i64)0 outputScript:bytes acde2e1652771b50a2c68fd330ee1d4b8e115631ce72375432 pooling:(i64)0 status:(i64)3 transactionIndex:(i64)3 transactionSignHeight:(i64)261103 - // document v0 : id:DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:05 updated_at:2024-11-21 12:31:05 amount:(i64)271303 coreFeePerByte:(i64)0 outputScript:bytes 0b845e8c3a4679f1913172f7fd939cc153f458519de8ed3d pooling:(i64)0 status:(i64)0 - // document v0 : id:FDnvFN7e72LcZEojTWNmJTP7uzok3BtvbKnaa5gjqCpW owner_id:A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ created_at:2024-11-21 12:31:11 updated_at:2024-11-21 12:31:11 amount:(i64)123433 coreFeePerByte:(i64)0 outputScript:bytes 82712473b2d0fc5663afb1a08006913ccccbf38e091a8cc7 pooling:(i64)0 status:(i64)4 transactionIndex:(i64)6 transactionSignHeight:(i64)319518 - - let query_value = json!({ - "where": [ - ["$ownerId", "==", "A8GdKdMT7eDvtjnmMXe1Z3YaTtJzZdxNDRkeLb8goFrZ"] - ], - "startAfter": "2kTB6gW4wCCnySj3UFUJQM3aUYBd6qDfLCY74BnWmFKu", - "limit": 3, - "orderBy": [ - ["$updatedAt", "desc"] - ] - }); - - // This will use the identity recent index - // { - // "name": "identityRecent", - // "properties": [ - // { - // "$ownerId": "asc" - // }, - // { - // "$updatedAt": "asc" - // }, - // { - // "status": "asc" - // } - // ], - // "unique": false - // }, - - let where_cbor = cbor_serializer::serializable_value_to_cbor(&query_value, None) - .expect("expected to serialize to cbor"); - let domain_document_type = contract - .document_type_for_name("withdrawal") - .expect("contract should have a domain document type"); - let query = DriveDocumentQuery::from_cbor( - where_cbor.as_slice(), - &contract, - domain_document_type, - &drive.config, - ) - .expect("query should be built"); - let (results, _, _) = query - .execute_raw_results_no_proof(&drive, None, Some(&db_transaction), platform_version) - .expect("proof should be executed"); - let names: Vec = results - .iter() - .map(|result| { - let document = - Document::from_bytes(result.as_slice(), domain_document_type, platform_version) - .expect("we should be able to deserialize the document"); - document.id().to_string(Encoding::Base58) - }) - .collect(); - - // We only get back 2 values, even though we put limit 3 because the time with status 0 is an - // empty tree and consumes a limit - let a_names = [ - "DxFzXvkb2mNQHmeVknsv3gWsc6rMtLk9AsS5zMpy6hou".to_string(), - "CCjaU67Pe79Vt51oXvQ5SkyNiypofNX9DS9PYydN9tpD".to_string(), - ]; - - assert_eq!(names, a_names); - - let (proof_root_hash, proof_results, _) = query - .execute_with_proof_only_get_elements(&drive, None, None, platform_version) - .expect("we should be able to a proof"); - assert_eq!(root_hash, proof_root_hash); - assert_eq!(results, proof_results); -} + let platform_version = PlatformVersion::latest(); -#[cfg(feature = "server")] -#[test] -fn test_query_a_b_c_d_e_contract() { - let drive: Drive = setup_drive_with_initial_state_structure(None); + // Create a contract - let platform_version = PlatformVersion::latest(); + let block_info = BlockInfo::default(); + let owner_id = dpp::identifier::Identifier::new([2u8; 32]); - // Create a contract - - let block_info = BlockInfo::default(); - let owner_id = dpp::identifier::Identifier::new([2u8; 32]); - - let documents = platform_value!({ - "testDocument": { - "type": "object", - "properties": { - "a": { - "type": "integer", - "position": 0 - }, - "b": { - "type": "integer", - "position": 1 - }, - "c": { - "type": "integer", - "position": 2 - }, - "d": { - "type": "integer", - "position": 3 - }, - "e": { - "type": "integer", - "position": 4 - } - }, - "additionalProperties": false, - "indices": [ - { - "name": "abcde", - "properties": [ - { - "a": "asc" + let documents = platform_value!({ + "testDocument": { + "type": "object", + "properties": { + "a": { + "type": "integer", + "position": 0 }, - { - "b": "asc" + "b": { + "type": "integer", + "position": 1 }, - { - "c": "asc" + "c": { + "type": "integer", + "position": 2 }, - { - "d": "asc" + "d": { + "type": "integer", + "position": 3 }, - { - "e": "asc" + "e": { + "type": "integer", + "position": 4 } + }, + "additionalProperties": false, + "indices": [ + { + "name": "abcde", + "properties": [ + { + "a": "asc" + }, + { + "b": "asc" + }, + { + "c": "asc" + }, + { + "d": "asc" + }, + { + "e": "asc" + } + ] + }, ] - }, - ] - } - }); - - let factory = - DataContractFactory::new(platform_version.protocol_version).expect("should create factory"); - - let contract = factory - .create_with_value_config(owner_id, 0, documents, None, None) - .expect("data in fixture should be correct") - .data_contract_owned(); + } + }); - drive - .apply_contract( - &contract, - block_info, - true, - StorageFlags::optional_default_as_cow(), - None, - platform_version, - ) - .expect("should apply contract"); + let factory = DataContractFactory::new(platform_version.protocol_version) + .expect("should create factory"); - // Perform query + let contract = factory + .create_with_value_config(owner_id, 0, documents, None, None) + .expect("data in fixture should be correct") + .data_contract_owned(); - let document_type = "testDocument".to_string(); + drive + .apply_contract( + &contract, + block_info, + true, + StorageFlags::optional_default_as_cow(), + None, + platform_version, + ) + .expect("should apply contract"); - let query_json = json!({ - "where": [ - ["a","==",1], - ["b","==",2], - ["c","==",3], - ["d","in",[1,2]]], - "orderBy":[ - ["d","desc"], - ["e","asc"] - ] - }); + // Perform query - let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_json, None) - .expect("expected to serialize to cbor"); + let document_type = "testDocument".to_string(); - drive - .query_documents_cbor_from_contract( - &contract, - contract - .document_type_for_name(&document_type) - .expect("should have this document type"), - &query_cbor, - None, - None, - Some(platform_version.protocol_version), - ) - .expect("should perform query"); -} + let query_json = json!({ + "where": [ + ["a","==",1], + ["b","==",2], + ["c","==",3], + ["d","in",[1,2]]], + "orderBy":[ + ["d","desc"], + ["e","asc"] + ] + }); -#[cfg(feature = "server")] -#[test] -fn test_query_documents_by_created_at() { - let drive = setup_drive_with_initial_state_structure(None); + let query_cbor = cbor_serializer::serializable_value_to_cbor(&query_json, None) + .expect("expected to serialize to cbor"); - let platform_version = PlatformVersion::latest(); + drive + .query_documents_cbor_from_contract( + &contract, + contract + .document_type_for_name(&document_type) + .expect("should have this document type"), + &query_cbor, + None, + None, + Some(platform_version.protocol_version), + ) + .expect("should perform query"); + } - let contract_value = platform_value!({ - "$format_version": "0", - "id": "BZUodcFoFL6KvnonehrnMVggTvCe8W5MiRnZuqLb6M54", - "version": 1, - "ownerId": "GZVdTnLFAN2yE9rLeCHBDBCr7YQgmXJuoExkY347j7Z5", - "documentSchemas": { - "indexedDocument": { - "type": "object", - "indices": [ - {"name":"index1", "properties": [{"$ownerId":"asc"}, {"firstName":"desc"}], "unique":true}, - {"name":"index2", "properties": [{"$ownerId":"asc"}, {"lastName":"desc"}], "unique":true}, - {"name":"index3", "properties": [{"lastName":"asc"}]}, - {"name":"index4", "properties": [{"$createdAt":"asc"}, {"$updatedAt":"asc"}]}, - {"name":"index5", "properties": [{"$updatedAt":"asc"}]}, - {"name":"index6", "properties": [{"$createdAt":"asc"}]} - ], - "properties":{ - "firstName": { - "type": "string", - "maxLength": 63, - "position": 0 + #[cfg(feature = "server")] + #[test] + fn test_query_documents_by_created_at() { + let drive = setup_drive_with_initial_state_structure(None); + + let platform_version = PlatformVersion::latest(); + + let contract_value = platform_value!({ + "$format_version": "0", + "id": "BZUodcFoFL6KvnonehrnMVggTvCe8W5MiRnZuqLb6M54", + "version": 1, + "ownerId": "GZVdTnLFAN2yE9rLeCHBDBCr7YQgmXJuoExkY347j7Z5", + "documentSchemas": { + "indexedDocument": { + "type": "object", + "indices": [ + {"name":"index1", "properties": [{"$ownerId":"asc"}, {"firstName":"desc"}], "unique":true}, + {"name":"index2", "properties": [{"$ownerId":"asc"}, {"lastName":"desc"}], "unique":true}, + {"name":"index3", "properties": [{"lastName":"asc"}]}, + {"name":"index4", "properties": [{"$createdAt":"asc"}, {"$updatedAt":"asc"}]}, + {"name":"index5", "properties": [{"$updatedAt":"asc"}]}, + {"name":"index6", "properties": [{"$createdAt":"asc"}]} + ], + "properties":{ + "firstName": { + "type": "string", + "maxLength": 63, + "position": 0 + }, + "lastName": { + "type": "string", + "maxLength": 63, + "position": 1 + } }, - "lastName": { - "type": "string", - "maxLength": 63, - "position": 1 - } + "required": ["firstName", "$createdAt", "$updatedAt", "lastName"], + "additionalProperties": false, }, - "required": ["firstName", "$createdAt", "$updatedAt", "lastName"], - "additionalProperties": false, }, - }, - }); + }); - let contract = DataContract::from_value(contract_value, false, platform_version) - .expect("should create a contract from cbor"); + let contract = DataContract::from_value(contract_value, false, platform_version) + .expect("should create a contract from cbor"); - drive - .apply_contract( - &contract, - BlockInfo::default(), - true, - None, - None, - platform_version, - ) - .expect("should apply contract"); - - // Create document - - let created_at: TimestampMillis = 1647535750329; - - let document_value = platform_value!({ - "firstName": "myName", - "lastName": "lastName", - "$createdAt": created_at, - "$updatedAt": created_at, - }); - - let document = contract - .document_type_for_name("indexedDocument") - .expect("should have indexedDocument type") - .create_document_from_data( - document_value, - Identifier::random(), - random(), - random(), - random(), - platform_version, - ) - .expect("should create document"); + drive + .apply_contract( + &contract, + BlockInfo::default(), + true, + None, + None, + platform_version, + ) + .expect("should apply contract"); - let info = DocumentAndContractInfo { - owned_document_info: OwnedDocumentInfo { - document_info: DocumentInfo::DocumentOwnedInfo((document, None)), - owner_id: None, - }, - contract: &contract, - document_type: contract - .document_type_for_name("indexedDocument") - .expect("should have indexedDocument type"), - }; + // Create document - drive - .add_document_for_contract( - info, - true, - BlockInfo::default(), - true, - None, - platform_version, - None, - ) - .expect("should add document"); + let created_at: TimestampMillis = 1647535750329; - // Query document + let document_value = platform_value!({ + "firstName": "myName", + "lastName": "lastName", + "$createdAt": created_at, + "$updatedAt": created_at, + }); - let query_cbor = cbor!({ - "where" => [ - ["$createdAt", "==", created_at] - ], - }) - .expect("should create cbor"); + let document = contract + .document_type_for_name("indexedDocument") + .expect("should have indexedDocument type") + .create_document_from_data( + document_value, + Identifier::random(), + random(), + random(), + random(), + platform_version, + ) + .expect("should create document"); - let query_bytes = cbor_serializer::serializable_value_to_cbor(&query_cbor, None) - .expect("should serialize cbor value to bytes"); + let info = DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentInfo::DocumentOwnedInfo((document, None)), + owner_id: None, + }, + contract: &contract, + document_type: contract + .document_type_for_name("indexedDocument") + .expect("should have indexedDocument type"), + }; - let document_type = contract - .document_type_for_name("indexedDocument") - .expect("should get document type"); + drive + .add_document_for_contract( + info, + true, + BlockInfo::default(), + true, + None, + platform_version, + None, + ) + .expect("should add document"); - let query = DriveDocumentQuery::from_cbor( - &query_bytes, - &contract, - document_type, - &DriveConfig::default(), - ) - .expect("should create a query from cbor"); - - assert_eq!( - query.internal_clauses.equal_clauses.get("$createdAt"), - Some(&WhereClause { - field: "$createdAt".to_string(), - operator: WhereOperator::Equal, - value: Value::U64(created_at) + // Query document + + let query_cbor = cbor!({ + "where" => [ + ["$createdAt", "==", created_at] + ], }) - ); + .expect("should create cbor"); - let query_result = drive - .query_documents( - query, - None, - false, - None, - Some(platform_version.protocol_version), + let query_bytes = cbor_serializer::serializable_value_to_cbor(&query_cbor, None) + .expect("should serialize cbor value to bytes"); + + let document_type = contract + .document_type_for_name("indexedDocument") + .expect("should get document type"); + + let query = DriveDocumentQuery::from_cbor( + &query_bytes, + &contract, + document_type, + &DriveConfig::default(), ) - .expect("should query documents"); + .expect("should create a query from cbor"); + + assert_eq!( + query.internal_clauses.equal_clauses.get("$createdAt"), + Some(&WhereClause { + field: "$createdAt".to_string(), + operator: WhereOperator::Equal, + value: Value::U64(created_at) + }) + ); - assert_eq!(query_result.documents().len(), 1); -} + let query_result = drive + .query_documents( + query, + None, + false, + None, + Some(platform_version.protocol_version), + ) + .expect("should query documents"); -#[cfg(feature = "server")] -#[test] -#[ignore] -fn pwd() { - let working_dir = std::env::current_dir().unwrap(); - println!("{}", working_dir.display()); + assert_eq!(query_result.documents().len(), 1); + } } diff --git a/packages/rs-drive/tests/query_tests_history.rs b/packages/rs-drive/tests/query_tests_history.rs index 1aa1b98fe48..2ffe58ce495 100644 --- a/packages/rs-drive/tests/query_tests_history.rs +++ b/packages/rs-drive/tests/query_tests_history.rs @@ -1666,8 +1666,8 @@ fn test_query_historical_latest_platform_version() { assert_eq!( root_hash.as_slice(), vec![ - 5, 227, 59, 163, 38, 58, 181, 91, 23, 56, 47, 52, 138, 63, 5, 54, 205, 249, 205, 225, - 78, 225, 195, 2, 104, 6, 11, 77, 56, 69, 113, 237, + 133, 44, 161, 89, 108, 253, 236, 16, 93, 4, 122, 187, 249, 226, 208, 148, 47, 201, 46, + 129, 167, 114, 63, 221, 133, 12, 1, 136, 135, 208, 226, 213 ] ); @@ -3032,8 +3032,8 @@ fn test_query_historical_latest_platform_version() { assert_eq!( root_hash.as_slice(), vec![ - 203, 160, 200, 71, 200, 156, 249, 93, 201, 35, 171, 6, 57, 176, 159, 104, 26, 253, 141, - 193, 153, 0, 212, 9, 207, 239, 250, 51, 68, 228, 210, 164 + 124, 248, 167, 74, 81, 40, 160, 183, 10, 34, 119, 221, 225, 7, 207, 42, 42, 205, 8, + 149, 201, 22, 165, 217, 125, 191, 152, 223, 66, 102, 9, 180 ] ); } diff --git a/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/mod.rs b/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/mod.rs index 237f7c75c0e..fae14c1ed5c 100644 --- a/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/mod.rs +++ b/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/mod.rs @@ -24,6 +24,7 @@ pub struct DataContractMethodVersions { pub validate_update: FeatureVersion, pub schema: FeatureVersion, pub validate_groups: FeatureVersion, + pub equal_ignoring_time_fields: FeatureVersion, } #[derive(Clone, Debug, Default)] diff --git a/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v1.rs b/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v1.rs index 467d3670338..c7060f812e7 100644 --- a/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v1.rs +++ b/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v1.rs @@ -20,6 +20,7 @@ pub const CONTRACT_VERSIONS_V1: DPPContractVersions = DPPContractVersions { validate_update: 0, schema: 0, validate_groups: 0, + equal_ignoring_time_fields: 0, }, document_type_versions: DocumentTypeVersions { index_versions: DocumentTypeIndexVersions { diff --git a/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v2.rs b/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v2.rs index 854a3dc331b..dfda3ca1299 100644 --- a/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v2.rs +++ b/packages/rs-platform-version/src/version/dpp_versions/dpp_contract_versions/v2.rs @@ -20,6 +20,7 @@ pub const CONTRACT_VERSIONS_V2: DPPContractVersions = DPPContractVersions { validate_update: 0, schema: 0, validate_groups: 0, + equal_ignoring_time_fields: 0, }, document_type_versions: DocumentTypeVersions { index_versions: DocumentTypeIndexVersions { diff --git a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_method_versions/v6.rs b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_method_versions/v6.rs index 8e29725ed6e..66754054a71 100644 --- a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_method_versions/v6.rs +++ b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_method_versions/v6.rs @@ -12,7 +12,7 @@ use crate::version::drive_abci_versions::drive_abci_method_versions::{ DriveAbciVotingMethodVersions, }; -// Introduced in Protocol version 8 for tokens +// Introduced in Protocol version 9 for tokens pub const DRIVE_ABCI_METHOD_VERSIONS_V6: DriveAbciMethodVersions = DriveAbciMethodVersions { engine: DriveAbciEngineMethodVersions { init_chain: 0, @@ -75,7 +75,8 @@ pub const DRIVE_ABCI_METHOD_VERSIONS_V6: DriveAbciMethodVersions = DriveAbciMeth add_distribute_storage_fee_to_epochs_operations: 0, }, fee_pool_outwards_distribution: DriveAbciFeePoolOutwardsDistributionMethodVersions { - add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations: 0, + // this changes to 1 and now stores additional info about the epoch + add_distribute_fees_from_oldest_unpaid_epoch_pool_to_proposers_operations: 1, // new add_epoch_pool_to_proposers_payout_operations: 0, find_oldest_epoch_needing_payment: 0, fetch_reward_shares_list_for_masternode: 0, diff --git a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/mod.rs b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/mod.rs index 587366149e8..fb85eea357c 100644 --- a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/mod.rs +++ b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/mod.rs @@ -101,7 +101,7 @@ pub struct DriveAbciDocumentsStateTransitionValidationVersions { pub token_mint_transition_structure_validation: FeatureVersion, pub token_burn_transition_structure_validation: FeatureVersion, pub token_transfer_transition_structure_validation: FeatureVersion, - pub token_issuance_transition_state_validation: FeatureVersion, + pub token_mint_transition_state_validation: FeatureVersion, pub token_burn_transition_state_validation: FeatureVersion, pub token_transfer_transition_state_validation: FeatureVersion, pub token_base_transition_structure_validation: FeatureVersion, @@ -117,6 +117,8 @@ pub struct DriveAbciDocumentsStateTransitionValidationVersions { pub token_config_update_transition_structure_validation: FeatureVersion, pub token_config_update_transition_state_validation: FeatureVersion, pub token_base_transition_group_action_validation: FeatureVersion, + pub token_claim_transition_structure_validation: FeatureVersion, + pub token_claim_transition_state_validation: FeatureVersion, } #[derive(Clone, Debug, Default)] diff --git a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v1.rs b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v1.rs index 7b397d96426..e5e39e28c83 100644 --- a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v1.rs +++ b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v1.rs @@ -132,7 +132,7 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V1: DriveAbciValidationVersions = token_mint_transition_structure_validation: 0, token_burn_transition_structure_validation: 0, token_transfer_transition_structure_validation: 0, - token_issuance_transition_state_validation: 0, + token_mint_transition_state_validation: 0, token_burn_transition_state_validation: 0, token_transfer_transition_state_validation: 0, token_base_transition_structure_validation: 0, @@ -148,6 +148,8 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V1: DriveAbciValidationVersions = token_config_update_transition_structure_validation: 0, token_config_update_transition_state_validation: 0, token_base_transition_group_action_validation: 0, + token_claim_transition_structure_validation: 0, + token_claim_transition_state_validation: 0, }, }, has_nonce_validation: 0, diff --git a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v2.rs b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v2.rs index 3df6d962f52..218f3707354 100644 --- a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v2.rs +++ b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v2.rs @@ -132,7 +132,7 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V2: DriveAbciValidationVersions = token_mint_transition_structure_validation: 0, token_burn_transition_structure_validation: 0, token_transfer_transition_structure_validation: 0, - token_issuance_transition_state_validation: 0, + token_mint_transition_state_validation: 0, token_burn_transition_state_validation: 0, token_transfer_transition_state_validation: 0, token_base_transition_structure_validation: 0, @@ -148,6 +148,8 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V2: DriveAbciValidationVersions = token_config_update_transition_structure_validation: 0, token_config_update_transition_state_validation: 0, token_base_transition_group_action_validation: 0, + token_claim_transition_structure_validation: 0, + token_claim_transition_state_validation: 0, }, }, has_nonce_validation: 0, diff --git a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v3.rs b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v3.rs index e3aadceb22d..04d0270979b 100644 --- a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v3.rs +++ b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v3.rs @@ -132,7 +132,7 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V3: DriveAbciValidationVersions = token_mint_transition_structure_validation: 0, token_burn_transition_structure_validation: 0, token_transfer_transition_structure_validation: 0, - token_issuance_transition_state_validation: 0, + token_mint_transition_state_validation: 0, token_burn_transition_state_validation: 0, token_transfer_transition_state_validation: 0, token_base_transition_structure_validation: 0, @@ -148,6 +148,8 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V3: DriveAbciValidationVersions = token_config_update_transition_structure_validation: 0, token_config_update_transition_state_validation: 0, token_base_transition_group_action_validation: 0, + token_claim_transition_structure_validation: 0, + token_claim_transition_state_validation: 0, }, }, has_nonce_validation: 0, diff --git a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v4.rs b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v4.rs index aa0ded10270..78fe27cdb58 100644 --- a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v4.rs +++ b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v4.rs @@ -135,7 +135,7 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V4: DriveAbciValidationVersions = token_mint_transition_structure_validation: 0, token_burn_transition_structure_validation: 0, token_transfer_transition_structure_validation: 0, - token_issuance_transition_state_validation: 0, + token_mint_transition_state_validation: 0, token_burn_transition_state_validation: 0, token_transfer_transition_state_validation: 0, token_base_transition_structure_validation: 0, @@ -151,6 +151,8 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V4: DriveAbciValidationVersions = token_config_update_transition_structure_validation: 0, token_config_update_transition_state_validation: 0, token_base_transition_group_action_validation: 0, + token_claim_transition_structure_validation: 0, + token_claim_transition_state_validation: 0, }, }, has_nonce_validation: 1, // <---- changed this diff --git a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v5.rs b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v5.rs index 0c3bcfea280..fcc09a5d892 100644 --- a/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v5.rs +++ b/packages/rs-platform-version/src/version/drive_abci_versions/drive_abci_validation_versions/v5.rs @@ -136,7 +136,7 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V5: DriveAbciValidationVersions = token_mint_transition_structure_validation: 0, token_burn_transition_structure_validation: 0, token_transfer_transition_structure_validation: 0, - token_issuance_transition_state_validation: 0, + token_mint_transition_state_validation: 0, token_burn_transition_state_validation: 0, token_transfer_transition_state_validation: 0, token_base_transition_structure_validation: 0, @@ -152,6 +152,8 @@ pub const DRIVE_ABCI_VALIDATION_VERSIONS_V5: DriveAbciValidationVersions = token_config_update_transition_structure_validation: 0, token_config_update_transition_state_validation: 0, token_base_transition_group_action_validation: 0, + token_claim_transition_structure_validation: 0, + token_claim_transition_state_validation: 0, }, }, has_nonce_validation: 1, diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/mod.rs b/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/mod.rs index 4e60e2c417d..80f39dabc07 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/mod.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/mod.rs @@ -29,6 +29,9 @@ pub struct DriveCreditPoolEpochsMethodVersions { pub add_update_pending_epoch_refunds_operations: FeatureVersion, pub is_epochs_proposers_tree_empty: FeatureVersion, pub add_epoch_processing_credits_for_distribution_operation: FeatureVersion, + pub add_epoch_final_info_operation: FeatureVersion, + pub get_epoch_protocol_version: FeatureVersion, + pub get_finalized_epoch_infos: FeatureVersion, } #[derive(Clone, Debug, Default)] diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/v1.rs b/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/v1.rs index 7c2252edfb8..836fed9e770 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/v1.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_credit_pool_method_versions/v1.rs @@ -26,6 +26,9 @@ pub const CREDIT_POOL_METHOD_VERSIONS_V1: DriveCreditPoolMethodVersions = add_update_pending_epoch_refunds_operations: 0, is_epochs_proposers_tree_empty: 0, add_epoch_processing_credits_for_distribution_operation: 0, + add_epoch_final_info_operation: 0, + get_epoch_protocol_version: 0, + get_finalized_epoch_infos: 0, }, pending_epoch_refunds: DriveCreditPoolPendingEpochRefundsMethodVersions { add_delete_pending_epoch_refunds_except_specified: 0, diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/mod.rs b/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/mod.rs index 7528ec11a40..1a2526f4fe7 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/mod.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/mod.rs @@ -72,6 +72,7 @@ pub struct DriveIdentityCostEstimationMethodVersions { pub for_token_identity_infos: FeatureVersion, pub for_token_pre_programmed_distribution: FeatureVersion, pub for_root_token_ms_interval_distribution: FeatureVersion, + pub for_token_perpetual_distribution: FeatureVersion, } #[derive(Clone, Debug, Default)] diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/v1.rs b/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/v1.rs index 3db719ae7fa..beb79c65c18 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/v1.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_identity_method_versions/v1.rs @@ -131,6 +131,7 @@ pub const DRIVE_IDENTITY_METHOD_VERSIONS_V1: DriveIdentityMethodVersions = for_root_key_reference_tree: 0, for_update_revision: 0, for_token_identity_infos: 0, + for_token_perpetual_distribution: 0, for_token_pre_programmed_distribution: 0, for_root_token_ms_interval_distribution: 0, }, diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/mod.rs b/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/mod.rs index 23c81d18ce4..1e38cbb87f3 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/mod.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/mod.rs @@ -38,6 +38,7 @@ pub struct DriveStateTransitionActionConvertToHighLevelOperationsMethodVersions pub token_emergency_action_transition: FeatureVersion, pub token_destroy_frozen_funds_transition: FeatureVersion, pub token_config_update_transition: FeatureVersion, + pub token_claim_transition: FeatureVersion, } #[derive(Clone, Debug, Default)] diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/v1.rs b/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/v1.rs index 5db70c5c197..5ca5fd49951 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/v1.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_state_transition_method_versions/v1.rs @@ -40,5 +40,6 @@ pub const DRIVE_STATE_TRANSITION_METHOD_VERSIONS_V1: DriveStateTransitionMethodV token_emergency_action_transition: 0, token_destroy_frozen_funds_transition: 0, token_config_update_transition: 0, + token_claim_transition: 0, }, }; diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/mod.rs b/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/mod.rs index 79ae0f0348a..0a94075e0a3 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/mod.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/mod.rs @@ -15,6 +15,8 @@ pub struct DriveTokenMethodVersions { pub struct DriveTokenDistributionMethodVersions { pub add_perpetual_distribution: FeatureVersion, pub add_pre_programmed_distributions: FeatureVersion, + pub mark_perpetual_release_as_distributed: FeatureVersion, + pub mark_pre_programmed_release_as_distributed: FeatureVersion, } #[derive(Clone, Debug, Default)] @@ -30,6 +32,7 @@ pub struct DriveTokenFetchMethodVersions { pub token_total_supply: FeatureVersion, pub token_total_aggregated_identity_balances: FeatureVersion, pub pre_programmed_distributions: FeatureVersion, + pub perpetual_distribution_last_paid_time: FeatureVersion, } #[derive(Clone, Debug, Default)] @@ -50,6 +53,7 @@ pub struct DriveTokenUpdateMethodVersions { pub create_token_trees: FeatureVersion, pub burn: FeatureVersion, pub mint: FeatureVersion, + pub mint_many: FeatureVersion, pub transfer: FeatureVersion, pub add_to_token_total_supply: FeatureVersion, pub remove_from_token_total_supply: FeatureVersion, @@ -59,4 +63,5 @@ pub struct DriveTokenUpdateMethodVersions { pub freeze: FeatureVersion, pub unfreeze: FeatureVersion, pub apply_status: FeatureVersion, + pub perpetual_distribution_next_event_for_identity_id: FeatureVersion, } diff --git a/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/v1.rs b/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/v1.rs index d9c6f85d2f3..8ef1820b92d 100644 --- a/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/v1.rs +++ b/packages/rs-platform-version/src/version/drive_versions/drive_token_method_versions/v1.rs @@ -16,6 +16,7 @@ pub const DRIVE_TOKEN_METHOD_VERSIONS_V1: DriveTokenMethodVersions = DriveTokenM token_total_supply: 0, token_total_aggregated_identity_balances: 0, pre_programmed_distributions: 0, + perpetual_distribution_last_paid_time: 0, }, prove: DriveTokenProveMethodVersions { identity_token_balance: 0, @@ -32,6 +33,7 @@ pub const DRIVE_TOKEN_METHOD_VERSIONS_V1: DriveTokenMethodVersions = DriveTokenM create_token_trees: 0, burn: 0, mint: 0, + mint_many: 0, transfer: 0, add_to_token_total_supply: 0, remove_from_token_total_supply: 0, @@ -41,10 +43,13 @@ pub const DRIVE_TOKEN_METHOD_VERSIONS_V1: DriveTokenMethodVersions = DriveTokenM freeze: 0, unfreeze: 0, apply_status: 0, + perpetual_distribution_next_event_for_identity_id: 0, }, calculate_total_tokens_balance: 0, distribution: DriveTokenDistributionMethodVersions { add_perpetual_distribution: 0, add_pre_programmed_distributions: 0, + mark_perpetual_release_as_distributed: 0, + mark_pre_programmed_release_as_distributed: 0, }, }; diff --git a/packages/rs-sdk/src/mock/provider.rs b/packages/rs-sdk/src/mock/provider.rs index 879c4137ebe..9f5c2354310 100644 --- a/packages/rs-sdk/src/mock/provider.rs +++ b/packages/rs-sdk/src/mock/provider.rs @@ -6,6 +6,7 @@ use crate::sync::block_on; use crate::{Error, Sdk}; use arc_swap::ArcSwapAny; use dpp::prelude::{CoreBlockHeight, DataContract, Identifier}; +use dpp::version::PlatformVersion; use drive_proof_verifier::error::ContextProviderError; use drive_proof_verifier::ContextProvider; use std::hash::Hash; @@ -180,6 +181,7 @@ impl ContextProvider for GrpcContextProvider { fn get_data_contract( &self, data_contract_id: &Identifier, + _platform_version: &PlatformVersion, ) -> Result>, ContextProviderError> { if let Some(contract) = self.data_contracts_cache.get(data_contract_id) { return Ok(Some(contract)); diff --git a/packages/rs-sdk/src/platform/transition/broadcast.rs b/packages/rs-sdk/src/platform/transition/broadcast.rs index 72957948b28..406316b6cb3 100644 --- a/packages/rs-sdk/src/platform/transition/broadcast.rs +++ b/packages/rs-sdk/src/platform/transition/broadcast.rs @@ -132,7 +132,7 @@ impl BroadcastStateTransition for StateTransition { self, &block_info, proof.grovedb_proof.as_slice(), - &context_provider.as_contract_lookup_fn(), + &context_provider.as_contract_lookup_fn(sdk.version()), sdk.version(), ) .wrap_to_execution_result(&response)? diff --git a/packages/token-history-contract/schema/v1/token-history-contract-documents.json b/packages/token-history-contract/schema/v1/token-history-contract-documents.json index f8d1af53b03..c86a4d930d5 100644 --- a/packages/token-history-contract/schema/v1/token-history-contract-documents.json +++ b/packages/token-history-contract/schema/v1/token-history-contract-documents.json @@ -514,6 +514,110 @@ ], "additionalProperties": false }, + "claim": { + "type": "object", + "documentsMutable": false, + "canBeDeleted": false, + "creationRestrictionMode": 2, + "indices": [ + { + "name": "byDate", + "properties": [ + { + "tokenId": "asc" + }, + { + "$createdAt": "asc" + } + ] + }, + { + "name": "byRecipient", + "properties": [ + { + "tokenId": "asc" + }, + { + "recipientType": "asc" + }, + { + "recipientId": "asc" + }, + { + "$createdAt": "asc" + } + ] + }, + { + "name": "byDistributionType", + "properties": [ + { + "tokenId": "asc" + }, + { + "distributionType": "asc" + }, + { + "$createdAt": "asc" + } + ] + } + ], + "properties": { + "tokenId": { + "type": "array", + "byteArray": true, + "minItems": 32, + "maxItems": 32, + "description": "The token ID", + "position": 0, + "contentMediaType": "application/x.dash.dpp.identifier" + }, + "recipientType": { + "type": "integer", + "minimum": 0, + "maximum": 2, + "description": "Recipient type: 0 = ContractOwner, 1 = Identity, 2 = EvonodesByParticipation", + "position": 1 + }, + "recipientId": { + "type": "array", + "byteArray": true, + "minItems": 32, + "maxItems": 32, + "description": "An optional identity ID for direct recipient distributions", + "contentMediaType": "application/x.dash.dpp.identifier", + "position": 2 + }, + "distributionType": { + "type": "integer", + "minimum": 0, + "description": "The type of distribution (0: PreProgrammed, 1: Perpetual)", + "position": 3 + }, + "amount": { + "type": "integer", + "minimum": 0, + "description": "The amount of tokens released", + "position": 4 + }, + "note": { + "type": "string", + "maxLength": 2048, + "description": "An optional explanation for this release", + "position": 5 + } + }, + "required": [ + "tokenId", + "recipientType", + "distributionType", + "amount", + "$createdAt", + "$createdAtBlockHeight" + ], + "additionalProperties": false + }, "emergencyAction": { "type": "object", "documentsMutable": false, diff --git a/packages/wasm-dpp/src/data_contract/data_contract.rs b/packages/wasm-dpp/src/data_contract/data_contract.rs index 7e390d70077..63a9e73aabb 100644 --- a/packages/wasm-dpp/src/data_contract/data_contract.rs +++ b/packages/wasm-dpp/src/data_contract/data_contract.rs @@ -325,25 +325,6 @@ impl DataContractWasm { self.identity_nonce.unwrap_or_default() } - #[wasm_bindgen(js_name=getMetadata)] - pub fn metadata(&self) -> Option { - self.inner.metadata().cloned().map(Into::into) - } - - #[wasm_bindgen(js_name=setMetadata)] - pub fn set_metadata(&mut self, metadata: JsValue) -> Result<(), JsValue> { - let metadata = if !metadata.is_falsy() { - let metadata = metadata.to_wasm::("Metadata")?; - Some(metadata.to_owned().into()) - } else { - None - }; - - self.inner.set_metadata(metadata); - - Ok(()) - } - #[wasm_bindgen(js_name=toObject)] pub fn to_object(&self) -> Result { let platform_version = PlatformVersion::first(); diff --git a/packages/wasm-dpp/src/document/state_transition/batch_transition/token_transition/claim.rs b/packages/wasm-dpp/src/document/state_transition/batch_transition/token_transition/claim.rs new file mode 100644 index 00000000000..490274497d6 --- /dev/null +++ b/packages/wasm-dpp/src/document/state_transition/batch_transition/token_transition/claim.rs @@ -0,0 +1,12 @@ +use dpp::state_transition::batch_transition::TokenClaimTransition; +use wasm_bindgen::prelude::wasm_bindgen; + +#[wasm_bindgen(js_name=TokenClaimTransition)] +#[derive(Debug, Clone)] +pub struct TokenClaimTransitionWasm(TokenClaimTransition); + +impl From for TokenClaimTransitionWasm { + fn from(value: TokenClaimTransition) -> Self { + Self(value) + } +} diff --git a/packages/wasm-dpp/src/document/state_transition/batch_transition/token_transition/mod.rs b/packages/wasm-dpp/src/document/state_transition/batch_transition/token_transition/mod.rs index 8a0fb050c00..746c9e44c50 100644 --- a/packages/wasm-dpp/src/document/state_transition/batch_transition/token_transition/mod.rs +++ b/packages/wasm-dpp/src/document/state_transition/batch_transition/token_transition/mod.rs @@ -1,4 +1,5 @@ mod burn; +mod claim; mod config; mod destroy; mod emergency_action; @@ -8,6 +9,7 @@ mod transfer; mod unfreeze; use crate::batch_transition::token_transition::burn::TokenBurnTransitionWasm; +use crate::batch_transition::token_transition::claim::TokenClaimTransitionWasm; use crate::batch_transition::token_transition::config::TokenConfigUpdateTransitionWasm; use crate::batch_transition::token_transition::destroy::TokenDestroyFrozenFundsTransitionWasm; use crate::batch_transition::token_transition::emergency_action::TokenEmergencyActionTransitionWasm; @@ -32,6 +34,7 @@ pub enum TokenTransitionType { Freeze, Unfreeze, DestroyFrozenFunds, + Claim, EmergencyAction, ConfigUpdate, } @@ -47,6 +50,7 @@ impl From<&TokenTransition> for TokenTransitionType { TokenTransition::DestroyFrozenFunds(_) => TokenTransitionType::DestroyFrozenFunds, TokenTransition::EmergencyAction(_) => TokenTransitionType::EmergencyAction, TokenTransition::ConfigUpdate(_) => TokenTransitionType::ConfigUpdate, + TokenTransition::Claim(_) => TokenTransitionType::Claim, } } } @@ -112,6 +116,7 @@ impl TokenTransitionWasm { TokenTransition::ConfigUpdate(config_update) => { TokenConfigUpdateTransitionWasm::from(config_update.clone()).into() } + TokenTransition::Claim(claim) => TokenClaimTransitionWasm::from(claim.clone()).into(), } } } diff --git a/packages/wasm-dpp/src/errors/consensus/consensus_error.rs b/packages/wasm-dpp/src/errors/consensus/consensus_error.rs index a063f17fe0c..de6c3a88c2f 100644 --- a/packages/wasm-dpp/src/errors/consensus/consensus_error.rs +++ b/packages/wasm-dpp/src/errors/consensus/consensus_error.rs @@ -61,12 +61,12 @@ use dpp::consensus::state::data_trigger::DataTriggerError::{ DataTriggerConditionError, DataTriggerExecutionError, DataTriggerInvalidResultError, }; use wasm_bindgen::{JsError, JsValue}; -use dpp::consensus::basic::data_contract::{ContestedUniqueIndexOnMutableDocumentTypeError, ContestedUniqueIndexWithUniqueIndexError, DataContractTokenConfigurationUpdateError, GroupExceedsMaxMembersError, GroupMemberHasPowerOfZeroError, GroupMemberHasPowerOverLimitError, GroupNonUnilateralMemberPowerHasLessThanRequiredPowerError, GroupPositionDoesNotExistError, GroupTotalPowerLessThanRequiredError, InvalidDocumentTypeRequiredSecurityLevelError, InvalidTokenBaseSupplyError, NonContiguousContractGroupPositionsError, NonContiguousContractTokenPositionsError, UnknownDocumentCreationRestrictionModeError, UnknownSecurityLevelError, UnknownStorageKeyRequirementsError, UnknownTradeModeError, UnknownTransferableTypeError}; +use dpp::consensus::basic::data_contract::{ContestedUniqueIndexOnMutableDocumentTypeError, ContestedUniqueIndexWithUniqueIndexError, DataContractTokenConfigurationUpdateError, GroupExceedsMaxMembersError, GroupMemberHasPowerOfZeroError, GroupMemberHasPowerOverLimitError, GroupNonUnilateralMemberPowerHasLessThanRequiredPowerError, GroupPositionDoesNotExistError, GroupTotalPowerLessThanRequiredError, InvalidDocumentTypeRequiredSecurityLevelError, InvalidTokenBaseSupplyError, InvalidTokenDistributionFunctionDivideByZeroError, InvalidTokenDistributionFunctionIncoherenceError, InvalidTokenDistributionFunctionInvalidParameterError, InvalidTokenDistributionFunctionInvalidParameterTupleError, NonContiguousContractGroupPositionsError, NonContiguousContractTokenPositionsError, UnknownDocumentCreationRestrictionModeError, UnknownSecurityLevelError, UnknownStorageKeyRequirementsError, UnknownTradeModeError, UnknownTransferableTypeError}; use dpp::consensus::basic::document::{ContestedDocumentsTemporarilyNotAllowedError, DocumentCreationNotAllowedError, DocumentFieldMaxSizeExceededError, MaxDocumentsTransitionsExceededError, MissingPositionsInDocumentTypePropertiesError}; use dpp::consensus::basic::group::GroupActionNotAllowedOnTransitionError; use dpp::consensus::basic::identity::{DataContractBoundsNotPresentError, DisablingKeyIdAlsoBeingAddedInSameTransitionError, InvalidIdentityCreditWithdrawalTransitionAmountError, InvalidIdentityUpdateTransitionDisableKeysError, InvalidIdentityUpdateTransitionEmptyError, TooManyMasterPublicKeyError, WithdrawalOutputScriptNotAllowedWhenSigningWithOwnerKeyError}; use dpp::consensus::basic::overflow_error::OverflowError; -use dpp::consensus::basic::token::{ChoosingTokenMintRecipientNotAllowedError, ContractHasNoTokensError, DestinationIdentityForTokenMintingNotSetError, InvalidActionIdError, InvalidTokenIdError, InvalidTokenPositionError, TokenTransferToOurselfError}; +use dpp::consensus::basic::token::{ChoosingTokenMintRecipientNotAllowedError, ContractHasNoTokensError, DestinationIdentityForTokenMintingNotSetError, InvalidActionIdError, InvalidTokenAmountError, InvalidTokenConfigUpdateNoChangeError, InvalidTokenIdError, InvalidTokenNoteTooBigError, InvalidTokenPositionError, TokenTransferToOurselfError}; use dpp::consensus::state::data_contract::data_contract_update_action_not_allowed_error::DataContractUpdateActionNotAllowedError; use dpp::consensus::state::data_contract::document_type_update_error::DocumentTypeUpdateError; use dpp::consensus::state::document::document_contest_currently_locked_error::DocumentContestCurrentlyLockedError; @@ -84,7 +84,7 @@ use dpp::consensus::state::identity::no_transfer_key_for_core_withdrawal_availab use dpp::consensus::state::identity::RecipientIdentityDoesNotExistError; use dpp::consensus::state::prefunded_specialized_balances::prefunded_specialized_balance_insufficient_error::PrefundedSpecializedBalanceInsufficientError; use dpp::consensus::state::prefunded_specialized_balances::prefunded_specialized_balance_not_found_error::PrefundedSpecializedBalanceNotFoundError; -use dpp::consensus::state::token::{IdentityDoesNotHaveEnoughTokenBalanceError, IdentityTokenAccountNotFrozenError, IdentityTokenAccountFrozenError, TokenIsPausedError, IdentityTokenAccountAlreadyFrozenError, UnauthorizedTokenActionError, TokenSettingMaxSupplyToLessThanCurrentSupplyError, TokenMintPastMaxSupplyError, NewTokensDestinationIdentityDoesNotExistError, NewAuthorizedActionTakerIdentityDoesNotExistError, NewAuthorizedActionTakerGroupDoesNotExistError, NewAuthorizedActionTakerMainGroupNotSetError, InvalidGroupPositionError, TokenAlreadyPausedError, TokenNotPausedError}; +use dpp::consensus::state::token::{IdentityDoesNotHaveEnoughTokenBalanceError, IdentityTokenAccountNotFrozenError, IdentityTokenAccountFrozenError, TokenIsPausedError, IdentityTokenAccountAlreadyFrozenError, UnauthorizedTokenActionError, TokenSettingMaxSupplyToLessThanCurrentSupplyError, TokenMintPastMaxSupplyError, NewTokensDestinationIdentityDoesNotExistError, NewAuthorizedActionTakerIdentityDoesNotExistError, NewAuthorizedActionTakerGroupDoesNotExistError, NewAuthorizedActionTakerMainGroupNotSetError, InvalidGroupPositionError, TokenAlreadyPausedError, TokenNotPausedError, InvalidTokenClaimPropertyMismatch}; use dpp::consensus::state::voting::masternode_incorrect_voter_identity_id_error::MasternodeIncorrectVoterIdentityIdError; use dpp::consensus::state::voting::masternode_incorrect_voting_address_error::MasternodeIncorrectVotingAddressError; use dpp::consensus::state::voting::masternode_not_found_error::MasternodeNotFoundError; @@ -375,6 +375,9 @@ pub fn from_state_error(state_error: &StateError) -> JsValue { StateError::InvalidGroupPositionError(e) => { generic_consensus_error!(InvalidGroupPositionError, e).into() } + StateError::InvalidTokenClaimPropertyMismatch(e) => { + generic_consensus_error!(InvalidTokenClaimPropertyMismatch, e).into() + } } } @@ -699,6 +702,32 @@ fn from_basic_error(basic_error: &BasicError) -> JsValue { ) .into() } + BasicError::InvalidTokenAmountError(e) => { + generic_consensus_error!(InvalidTokenAmountError, e).into() + } + BasicError::InvalidTokenConfigUpdateNoChangeError(e) => { + generic_consensus_error!(InvalidTokenConfigUpdateNoChangeError, e).into() + } + BasicError::InvalidTokenDistributionFunctionDivideByZeroError(e) => { + generic_consensus_error!(InvalidTokenDistributionFunctionDivideByZeroError, e).into() + } + BasicError::InvalidTokenDistributionFunctionInvalidParameterError(e) => { + generic_consensus_error!(InvalidTokenDistributionFunctionInvalidParameterError, e) + .into() + } + BasicError::InvalidTokenDistributionFunctionInvalidParameterTupleError(e) => { + generic_consensus_error!( + InvalidTokenDistributionFunctionInvalidParameterTupleError, + e + ) + .into() + } + BasicError::InvalidTokenDistributionFunctionIncoherenceError(e) => { + generic_consensus_error!(InvalidTokenDistributionFunctionIncoherenceError, e).into() + } + BasicError::InvalidTokenNoteTooBigError(e) => { + generic_consensus_error!(InvalidTokenNoteTooBigError, e).into() + } } } diff --git a/packages/wasm-dpp/test/unit/dataContract/DataContract.spec.js b/packages/wasm-dpp/test/unit/dataContract/DataContract.spec.js index 9acee8bba5e..86ba6cf1354 100644 --- a/packages/wasm-dpp/test/unit/dataContract/DataContract.spec.js +++ b/packages/wasm-dpp/test/unit/dataContract/DataContract.spec.js @@ -16,11 +16,10 @@ describe('DataContract', () => { let DataContract; let Identifier; - let Metadata; before(async () => { ({ - DataContract, Identifier, Metadata, + DataContract, Identifier, } = await loadWasmDpp()); }); @@ -298,17 +297,6 @@ describe('DataContract', () => { }); }); - describe('#setMetadata', () => { - it('should set metadata', () => { - const otherMetadata = new Metadata(BigInt(43), 1, BigInt(100), 2); - const otherMetadataToObject = otherMetadata.toObject(); - - dataContract.setMetadata(otherMetadata); - - expect(dataContract.getMetadata().toObject()).to.deep.equal(otherMetadataToObject); - }); - }); - describe.skip('#setConfig', () => { it('should set config', () => { const config = {