diff --git a/package.json b/package.json index 1d2c4fcf..39ad5959 100644 --- a/package.json +++ b/package.json @@ -44,6 +44,7 @@ "@verida/did-document": "^2.0.0-rc4", "@verida/encryption-utils": "^2.0.0-rc1", "aws-serverless-express": "^3.4.0", + "axios": "^1.2.1", "cors": "^2.8.5", "did-resolver": "^3.1.0", "dotenv": "^8.2.0", @@ -65,7 +66,6 @@ "@babel/preset-env": "^7.20.2", "@verida/account-node": "^2.0.0-rc4", "@verida/client-ts": "^2.0.0-rc4", - "axios": "^0.27.2", "claudia": "^5.14.1", "ethers": "^5.7.2", "mocha": "^7.0.0", diff --git a/sample.env b/sample.env index 410c43f5..0acf3b05 100644 --- a/sample.env +++ b/sample.env @@ -1,19 +1,29 @@ DID_RPC_URL= DID_NETWORK=testnet DID_CACHE_DURATION=3600 +# Admin username and password (for system operations) +# MUST be set to something random DB_USER="admin" DB_PASS="admin" +# Replication username and password (for replicating data to other nodes) +# MUST be set to something random +# MUST not change once the node is operational +DB_REPLICATION_USER="" +DB_REPLICATION_PASS="" # Internal hostname (Used internally by the storage node for accessing CouchDb). DB_PROTOCOL_INTERNAL="http" -DB_HOST_INTERNAL="192.168.68.117" +DB_HOST_INTERNAL="localhost" DB_PORT_INTERNAL=5984 # External hostname (returned to network users to access CouchDb) DB_PROTOCOL_EXTERNAL="http" -DB_HOST_EXTERNAL="192.168.68.117" +DB_HOST_EXTERNAL="localhost" DB_PORT_EXTERNAL=5984 +# The public URI of this storage node server (Will match what is stored in DID Documents) +ENDPOINT_URI="http://localhost:5000" + DB_REJECT_UNAUTHORIZED_SSL=true ACCESS_JWT_SIGN_PK=insert-random-access-symmetric-key # 5 Minutes @@ -22,7 +32,6 @@ REFRESH_JWT_SIGN_PK=insert-random-refresh-symmetric-key # 30 Days REFRESH_TOKEN_EXPIRY=2592000 DB_REFRESH_TOKENS=verida_refresh_tokens -DB_DB_INFO=verida_db_info # How often garbage collection runs (1=100%, 0.5 = 50%) GC_PERCENT=0.1 # Verida Private Key as hex string (used to sign responses). Including leading 0x. @@ -36,5 +45,6 @@ MAX_USERS=10000 DB_PUBLIC_USER=784c2n780c9cn0789 DB_PUBLIC_PASS=784c2n780c9cn0789 DB_DIDS=verida_dids +DB_REPLICATER_CREDS=verida_replicater_creds PORT=5151 diff --git a/src/components/authManager.js b/src/components/authManager.js index e69868e7..6ed30ee9 100644 --- a/src/components/authManager.js +++ b/src/components/authManager.js @@ -7,6 +7,8 @@ import { DIDClient } from '@verida/did-client' import EncryptionUtils from '@verida/encryption-utils'; import Utils from './utils.js'; import Db from './db.js'; +import dbManager from './dbManager.js'; +import Axios from 'axios' dotenv.config(); @@ -80,6 +82,26 @@ class AuthManager { } async verifySignedConsentMessage(did, contextName, signature, consentMessage) { + // Verify the signature signed the correct string + try { + const didDocument = await this.getDidDocument(did) + const result = didDocument.verifySig(consentMessage, signature) + + if (!result) { + console.info('Invalid signature when verifying signed consent message') + // Invalid signature + return false + } + + return true + } catch (err) { + // Likely unable to resolve DID or invalid signature + console.info(`Unable to resolve DID or invalid signature: ${err.message}`) + return false + } + } + + async getDidDocument(did) { // Verify the signature signed the correct string const cacheKey = did @@ -97,28 +119,16 @@ class AuthManager { } didDocument = await didClient.get(did) - - // @todo: check if the doc was auto-generated or actually - // stored on chain? if not on chain, don't cache if (didDocument) { const { DID_CACHE_DURATION } = process.env mcache.put(cacheKey, didDocument, DID_CACHE_DURATION * 1000) } } - const result = didDocument.verifySig(consentMessage, signature) - - if (!result) { - console.warning('Invalid signature when verifying signed consent message') - // Invalid signature - return false - } - - return true + return didDocument } catch (err) { - // @todo: Log error // Likely unable to resolve DID or invalid signature - console.warning(`Unable to resolve DID or invalid signature: ${err.message}`) + console.info(`Unable to resolve DID`) return false } } @@ -381,6 +391,50 @@ class AuthManager { } } + try { + await couch.db.create(process.env.DB_REPLICATER_CREDS) + } catch (err) { + if (err.message.match(/already exists/)) { + // Database already exists + } else { + console.error(err) + throw err + } + } + + // Create replication user with access to all databases + try { + const userDb = couch.db.use('_users') + const username = process.env.DB_REPLICATION_USER + const password = process.env.DB_REPLICATION_PASS + const id = `org.couchdb.user:${username}` + const userRow = { + _id: id, + name: username, + password, + type: "user", + roles: ['replicater-local'] + } + + await userDb.insert(userRow, userRow._id) + + } catch (err) { + if (err.error != 'conflict') { + throw err + } + } + + try { + await couch.db.create('_replicator') + } catch (err) { + if (err.message.match(/already exists/)) { + // Database already exists + } else { + console.error(err) + throw err + } + } + const tokenDb = couch.db.use(process.env.DB_REFRESH_TOKENS); const deviceIndex = { @@ -397,7 +451,156 @@ class AuthManager { await tokenDb.createIndex(expiryIndex); } - // @todo: garbage collection + async ensureReplicationCredentials(endpointUri, password, replicaterRole) { + const username = Utils.generateReplicaterUsername(endpointUri) + const id = `org.couchdb.user:${username}` + + const couch = Db.getCouch('internal'); + const usersDb = await couch.db.use('_users') + let user + try { + user = await usersDb.get(id) + + let userRequiresUpdate = false + if (user.roles.indexOf(replicaterRole) == -1) { + //console.log(`User exists, but needs the replicatorRole added (${replicaterRole})`) + user.roles.push(replicaterRole) + userRequiresUpdate = true + } + + // User exists, check if we need to update the password + if (password) { + user.password = password + userRequiresUpdate = true + //console.log(`User exists and password needs updating`) + } + + if (userRequiresUpdate) { + // User exists and we need to update the password or roles + //console.log(`User exists, updating password and / or roles`) + + try { + await dbManager._insertOrUpdate(usersDb, user, user._id) + return "updated" + } catch (err) { + throw new Error(`Unable to update password: ${err.message}`) + } + } else { + // Nothing needed to change, so respond indicating the user exists + return "exists" + } + } catch (err) { + if (err.error !== 'not_found') { + throw err + } + + // Need to create the user + try { + //console.log('Replication user didnt exist, so creating') + await dbManager._insertOrUpdate(usersDb, { + _id: id, + name: username, + password, + type: "user", + roles: [replicaterRole] + }, id) + + return "created" + } catch (err) { + throw new Error(`Unable to create replication user: ${err.message}`) + } + } + } + + async fetchReplicaterCredentials(endpointUri, did, contextName) { + // Check process.env.DB_REPLICATER_CREDS for existing credentials + const couch = Db.getCouch('internal'); + const replicaterCredsDb = await couch.db.use(process.env.DB_REPLICATER_CREDS) + const replicaterHash = Utils.generateReplicatorHash(endpointUri, did, contextName) + + //console.log(`${Utils.serverUri()}: Fetching credentials for ${endpointUri}`) + + let creds + try { + creds = await replicaterCredsDb.get(replicaterHash) + //console.log(`${Utils.serverUri()}: Located credentials for ${endpointUri}`) + } catch (err) { + // If credentials aren't found, that's okay we will create them below + if (err.error != 'not_found') { + throw err + } + } + + if (!creds) { + //console.log(`${Utils.serverUri()}: No credentials found for ${endpointUri}... creating.`) + const timestampMinutes = Math.floor(Date.now() / 1000 / 60) + + // Generate a random password + const secretKeyBytes = EncryptionUtils.randomKey(32) + const password = Buffer.from(secretKeyBytes).toString('hex') + + const requestBody = { + did, + contextName, + endpointUri: Utils.serverUri(), + timestampMinutes, + password + } + + const privateKeyBytes = new Uint8Array(Buffer.from(process.env.VDA_PRIVATE_KEY.substring(2), 'hex')) + const signature = EncryptionUtils.signData(requestBody, privateKeyBytes) + requestBody.signature = signature + + // Fetch credentials from the endpointUri + //console.log(`${Utils.serverUri()}: Requesting the creation of credentials for ${endpointUri}`) + try { + await Axios.post(`${endpointUri}/auth/replicationCreds`, requestBody) + //console.log(`${Utils.serverUri()}: Credentials generated for ${endpointUri}`) + } catch (err) { + if (err.response) { + throw Error(`Unable to obtain credentials from ${endpointUri} (${err.response.data.message})`) + } + + throw err + } + + let couchUri + try { + const statusResponse = await Axios.get(`${endpointUri}/status`) + couchUri = statusResponse.data.results.couchUri + //console.log(`${Utils.serverUri()}: Status fetched ${endpointUri} with CouchURI: ${couchUri}`) + } catch (err) { + if (err.response) { + throw Error(`Unable to obtain credentials from ${endpointUri} (${err.response.data.message})`) + } + + throw err + } + + creds = { + _id: replicaterHash, + // Use this server username + username: Utils.generateReplicaterUsername(Utils.serverUri()), + password, + couchUri + } + + try { + await dbManager._insertOrUpdate(replicaterCredsDb, creds, creds._id) + //console.log(`${Utils.serverUri()}: Credentials saved for ${endpointUri}`) + } catch (err) { + throw new Error(`Unable to save replicater password : ${err.message} (${endpointUri})`) + } + } + + return { + username: creds.username, + password: creds.password, + couchUri: creds.couchUri + } + } + + // Garbage collection of refresh tokens async gc() { const GC_PERCENT = process.env.GC_PERCENT const random = Math.random() diff --git a/src/components/db.js b/src/components/db.js index 4c19dfda..0cb814c0 100644 --- a/src/components/db.js +++ b/src/components/db.js @@ -28,7 +28,7 @@ class Db { return PROTOCOL + "://" + username + ":" + password + "@" + HOST + ":" + PORT; } - // Build external hostname that users will connect to + // Build external hostname that users will connect to the storage node buildHost() { let env = process.env; return env.DB_PROTOCOL_EXTERNAL + "://" + env.DB_HOST_EXTERNAL + ":" + env.DB_PORT_EXTERNAL; diff --git a/src/components/dbManager.js b/src/components/dbManager.js index b7fab40d..d1ecd814 100644 --- a/src/components/dbManager.js +++ b/src/components/dbManager.js @@ -1,7 +1,6 @@ import Utils from './utils.js'; import _ from 'lodash'; import Db from "./db.js" -import EncryptionUtils from "@verida/encryption-utils" import dotenv from 'dotenv'; dotenv.config(); @@ -11,13 +10,47 @@ class DbManager { this.error = null; } - async saveUserDatabase(did, contextName, databaseName, databaseHash, permissions) { + async saveUserDatabase(did, owner, contextName, databaseName, databaseHash, permissions) { const couch = Db.getCouch() - const userDatabaseName = process.env.DB_DB_INFO - const db = couch.db.use(userDatabaseName) + const didContextHash = Utils.generateDidContextHash(did, contextName) + const didContextDbName = `c${didContextHash}` - const text = `${did.toLowerCase()}/${contextName}/${databaseName}` - const id = EncryptionUtils.hash(text).substring(2) + // Create database for storing all the databases for this user context + let db + try { + await couch.db.create(didContextDbName); + db = couch.db.use(didContextDbName); + + const replicaterRole = `r${didContextHash}-replicater` + + let securityDoc = { + admins: { + names: [owner], + roles: [] + }, + members: { + names: [owner], + roles: [replicaterRole] + } + }; + + // Insert security document to ensure owner is the admin and any other read / write users can access the database + try { + await this._insertOrUpdate(db, securityDoc, '_security'); + } catch (err) { + return false; + } + } catch (err) { + // The didContext database may already exist, or may have been deleted so a file + // already exists. + // In that case, ignore the error and continue + if (err.error != "file_exists") { + throw err; + } + } + + db = couch.db.use(didContextDbName); + const id = Utils.generateDatabaseName(did, contextName, databaseName) try { const result = await this._insertOrUpdate(db, { @@ -26,7 +59,7 @@ class DbManager { contextName, databaseName, databaseHash, - permissions + permissions: permissions ? permissions : {} }, id) } catch (err) { throw err @@ -35,41 +68,36 @@ class DbManager { async getUserDatabases(did, contextName) { const couch = Db.getCouch() - const userDatabaseName = process.env.DB_DB_INFO - const db = couch.db.use(userDatabaseName) + const didContextHash = Utils.generateDidContextHash(did, contextName) + const didContextDbName = `c${didContextHash}` try { - const query = { - selector: { - did, - contextName - }, - limit: 1000 - } - - const results = await db.find(query) - const finalResult = results.docs.map((item) => { - delete item['_id'] - delete item['_rev'] + const db = couch.db.use(didContextDbName) + const result = await db.list({include_docs: true, limit: 1000}) + const finalResult = result.rows.map((item) => { + delete item.doc['_id'] + delete item.doc['_rev'] - return item + return item.doc }) return finalResult } catch (err) { - if (err.reason != "missing") { + if (err.reason != 'missing' && err.error != 'not_found') { throw err; } + + return [] } } async getUserDatabase(did, contextName, databaseName) { const couch = Db.getCouch() - const userDatabaseName = process.env.DB_DB_INFO - const db = couch.db.use(userDatabaseName) + const didContextHash = Utils.generateDidContextHash(did, contextName) + const didContextDbName = `c${didContextHash}` + const db = couch.db.use(didContextDbName) - const text = `${did.toLowerCase()}/${contextName}/${databaseName}` - const id = EncryptionUtils.hash(text).substring(2) + const id = Utils.generateDatabaseName(did, contextName, databaseName) try { const doc = await db.get(id) @@ -86,7 +114,7 @@ class DbManager { return result } catch (err) { - if (err.reason == "missing") { + if (err.reason == 'missing' || err.reason == 'deleted') { return false } @@ -94,13 +122,13 @@ class DbManager { } } - async deleteUserDatabase(did, contextName, databaseName, databaseHash) { + async deleteUserDatabase(did, contextName, databaseName) { const couch = Db.getCouch() - const userDatabaseName = process.env.DB_DB_INFO - const db = couch.db.use(userDatabaseName) + const didContextHash = Utils.generateDidContextHash(did, contextName) + const didContextDbName = `c${didContextHash}` + const db = couch.db.use(didContextDbName) - const text = `${did.toLowerCase()}/${contextName}/${databaseName}` - const id = EncryptionUtils.hash(text).substring(2) + const id = Utils.generateDatabaseName(did, contextName, databaseName) try { await this._insertOrUpdate(db, { @@ -112,12 +140,12 @@ class DbManager { } } - async createDatabase(username, databaseName, applicationName, options) { + async createDatabase(did, username, databaseHash, contextName, options) { let couch = Db.getCouch(); // Create database try { - await couch.db.create(databaseName); + await couch.db.create(databaseHash); } catch (err) { // The database may already exist, or may have been deleted so a file // already exists. @@ -127,10 +155,10 @@ class DbManager { } } - let db = couch.db.use(databaseName); + let db = couch.db.use(databaseHash); try { - await this.configurePermissions(db, username, applicationName, options.permissions); + await this.configurePermissions(did, db, username, contextName, options.permissions); } catch (err) { //console.log("configure error"); //console.log(err); @@ -139,12 +167,12 @@ class DbManager { return true; } - async updateDatabase(username, databaseName, applicationName, options) { + async updateDatabase(did, username, databaseHash, contextName, options) { const couch = Db.getCouch(); - const db = couch.db.use(databaseName); + const db = couch.db.use(databaseHash); // Do a sanity check to confirm the username is an admin of the database - const perms = await couch.request({db: databaseName, method: 'get', path: '/_security'}) + const perms = await couch.request({db: databaseHash, method: 'get', path: '/_security'}) const usernameIsAdmin = perms.admins.names.includes(username) if (!usernameIsAdmin) { @@ -152,7 +180,7 @@ class DbManager { } try { - await this.configurePermissions(db, username, applicationName, options.permissions); + await this.configurePermissions(did, db, username, contextName, options.permissions); } catch (err) { //console.log("update database error"); //console.log(err); @@ -161,11 +189,11 @@ class DbManager { return true; } - async deleteDatabase(databaseName, username) { + async deleteDatabase(databaseHash, username) { const couch = Db.getCouch(); // Do a sanity check to confirm the username is an admin of the database - const perms = await couch.request({db: databaseName, method: 'get', path: '/_security'}) + const perms = await couch.request({db: databaseHash, method: 'get', path: '/_security'}) const usernameIsAdmin = perms.admins.names.includes(username) if (!usernameIsAdmin) { @@ -174,7 +202,7 @@ class DbManager { // Create database try { - return await couch.db.destroy(databaseName); + return await couch.db.destroy(databaseHash); } catch (err) { // The database may already exist, or may have been deleted so a file // already exists. @@ -183,7 +211,7 @@ class DbManager { } } - async configurePermissions(db, username, applicationName, permissions) { + async configurePermissions(did, db, username, contextName, permissions) { permissions = permissions ? permissions : {}; let owner = username; @@ -193,12 +221,10 @@ class DbManager { let readUsers = [owner]; let deleteUsers = [owner]; - // @todo Support modifying user lists after db has been created - switch (permissions.write) { case "users": - writeUsers = _.union(writeUsers, Utils.didsToUsernames(permissions.writeList, applicationName)); - deleteUsers = _.union(deleteUsers, Utils.didsToUsernames(permissions.deleteList, applicationName)); + writeUsers = _.union(writeUsers, Utils.didsToUsernames(permissions.writeList, contextName)); + deleteUsers = _.union(deleteUsers, Utils.didsToUsernames(permissions.deleteList, contextName)); break; case "public": writeUsers = writeUsers.concat([process.env.DB_PUBLIC_USER]); @@ -207,7 +233,7 @@ class DbManager { switch (permissions.read) { case "users": - readUsers = _.union(readUsers, Utils.didsToUsernames(permissions.readList, applicationName)); + readUsers = _.union(readUsers, Utils.didsToUsernames(permissions.readList, contextName)); break; case "public": readUsers = readUsers.concat([process.env.DB_PUBLIC_USER]); @@ -215,6 +241,8 @@ class DbManager { } const dbMembers = _.union(readUsers, writeUsers); + const didContextHash = Utils.generateDidContextHash(did, contextName) + const replicaterRole = `r${didContextHash}-replicater` let securityDoc = { admins: { @@ -224,7 +252,7 @@ class DbManager { members: { // this grants read access to all members names: dbMembers, - roles: [] + roles: [replicaterRole, 'replicater-local'] } }; @@ -240,7 +268,7 @@ class DbManager { let deleteUsersJson = JSON.stringify(deleteUsers); try { - const writeFunction = "\n function(newDoc, oldDoc, userCtx, secObj) {\n if (" + writeUsersJson + ".indexOf(userCtx.name) == -1) throw({ unauthorized: 'User is not permitted to write to database' });\n}"; + const writeFunction = `\n function(newDoc, oldDoc, userCtx, secObj) {\n if (${writeUsersJson}.indexOf(userCtx.name) == -1 && userCtx.roles.indexOf('${replicaterRole}') == -1) throw({ unauthorized: 'User is not permitted to write to database' });\n}`; const writeDoc = { "validate_doc_update": writeFunction }; @@ -256,7 +284,7 @@ class DbManager { if (permissions.write === "public") { // If the public has write permissions, disable public from deleting records try { - const deleteFunction = "\n function(newDoc, oldDoc, userCtx, secObj) {\n if ("+deleteUsersJson+".indexOf(userCtx.name) == -1 && newDoc._deleted) throw({ unauthorized: 'User is not permitted to delete from database' });\n}"; + const deleteFunction = `\n function(newDoc, oldDoc, userCtx, secObj) {\n if (${deleteUsersJson}.indexOf(userCtx.name) == -1 && userCtx.roles.indexOf('${replicaterRole}') == -1 && newDoc._deleted) throw({ unauthorized: 'User is not permitted to delete from database' });\n}`; const deleteDoc = { "validate_doc_update": deleteFunction }; @@ -293,9 +321,9 @@ class DbManager { if (doc._rev) { newDoc._rev = doc._rev; newDoc._id = id; - return db.insert(newDoc); + return await db.insert(newDoc); } else { - return db.insert(newDoc, id); + return await db.insert(newDoc, id); } } diff --git a/src/components/userManager.js b/src/components/userManager.js index 1e5ce152..eb5b61f7 100644 --- a/src/components/userManager.js +++ b/src/components/userManager.js @@ -2,6 +2,7 @@ import crypto from 'crypto'; import Db from './db.js' import Utils from './utils.js' import DbManager from './dbManager.js'; +import AuthManager from './authManager'; import dotenv from 'dotenv'; dotenv.config(); @@ -94,23 +95,6 @@ class UserManager { throw err; } } - - try { - await couch.db.create(process.env.DB_DB_INFO) - const dbInfo = couch.db.use(process.env.DB_DB_INFO) - await dbInfo.createIndex({ - index: { - fields: ['did', 'contextName'] - }, - name: 'didContext' - }) - } catch (err) { - if (err.message == "The database could not be created, the file already exists.") { - console.log("Info database not created -- already existed"); - } else { - throw err; - } - } } async getUsage(did, contextName) { @@ -126,9 +110,19 @@ class UserManager { for (let d in databases) { const database = databases[d] - const dbInfo = await DbManager.getUserDatabase(did, contextName, database.databaseName) - result.databases++ - result.bytes += dbInfo.info.sizes.file + try { + const dbInfo = await DbManager.getUserDatabase(did, contextName, database.databaseName) + result.databases++ + result.bytes += dbInfo.info.sizes.file + } catch (err) { + if (err.error == 'not_found') { + // Database doesn't exist, so remove from the list of databases + await DbManager.deleteUserDatabase(did, contextName, database.databaseName) + continue + } + + throw err + } } const usage = result.bytes / parseInt(result.storageLimit) @@ -136,6 +130,119 @@ class UserManager { return result } + /** + * Confirm replication is correctly configured for a given DID and application context. + * + * If a storage node is being added or removed to the application context, it must be the + * last node to have checkReplication called. This ensures the node has a list of all the + * active databases and can ensure it is replicating correctly to the other nodes. + * + * The client SDK should call checkReplication() when opening a context to ensure the replication is working as expected. + * + * @param {*} did + * @param {*} contextName + * @param {*} databaseName (optional) If not specified, checks all databases + */ + async checkReplication(did, contextName, databaseName) { + //console.log(`${Utils.serverUri()}: checkReplication(${did}, ${contextName}, ${databaseName})`) + // Lookup DID document and get list of endpoints for this context + const didDocument = await AuthManager.getDidDocument(did) + const didService = didDocument.locateServiceEndpoint(contextName, 'database') + let endpoints = [...didService.serviceEndpoint] // create a copy as this is cached and we will modify later + + // Confirm this endpoint is in the list of endpoints + const endpointIndex = endpoints.indexOf(Utils.serverUri()) + if (endpointIndex === -1) { + throw new Error('Server not a valid endpoint for this DID and context') + } + + // Remove this endpoint from the list of endpoints to check + endpoints.splice(endpointIndex, 1) + + let databases = [] + if (databaseName) { + //console.log(`${Utils.serverUri()}: Only checking ${databaseName}`) + // Only check a single database + databases.push(databaseName) + } else { + // Fetch all databases for this context + let userDatabases = await DbManager.getUserDatabases(did, contextName) + databases = userDatabases.map(item => item.databaseName) + //console.log(`${Utils.serverUri()}: Checking ${databases.length}) databases`) + } + + // Ensure there is a replication entry for each + const couch = Db.getCouch('internal') + const replicationDb = couch.db.use('_replicator') + + const localAuthBuffer = Buffer.from(`${process.env.DB_REPLICATION_USER}:${process.env.DB_REPLICATION_PASS}`); + const localAuthBase64 = localAuthBuffer.toString('base64') + + for (let d in databases) { + const dbName = databases[d] + + for (let e in endpoints) { + const endpointUri = endpoints[e] + const replicatorId = Utils.generateReplicatorHash(endpointUri, did, contextName) + const dbHash = Utils.generateDatabaseName(did, contextName, dbName) + let record + try { + record = await replicationDb.get(`${replicatorId}-${dbHash}`) + //console.log(`${Utils.serverUri()}: Located replication record for ${dbHash} on ${endpointUri} (${replicatorId})`) + } catch (err) { + if (err.message == 'missing' || err.reason == 'deleted') { + //console.log(`${Utils.serverUri()}: Replication record for ${endpointUri} is missing... creating.`) + // No record, so create it + // Check if we have credentials + // No credentials? Ask for them from the endpoint + const { username, password, couchUri } = await AuthManager.fetchReplicaterCredentials(endpointUri, did, contextName) + //console.log(`${Utils.serverUri()}: Located replication credentials for ${endpointUri} (${username}, ${password}, ${couchUri})`) + + const remoteAuthBuffer = Buffer.from(`${username}:${password}`); + const remoteAuthBase64 = remoteAuthBuffer.toString('base64') + + const replicationRecord = { + _id: `${replicatorId}-${dbHash}`, + user_ctx: { + name: process.env.DB_REPLICATION_USER + }, + source: { + url: `http://localhost:${process.env.DB_PORT_INTERNAL}/${dbHash}`, + headers: { + Authorization: `Basic ${localAuthBase64}` + } + }, + target: { + url: `${couchUri}/${dbHash}`, + headers: { + Authorization: `Basic ${remoteAuthBase64}` + } + }, + create_target: false, + continuous: true, + owner: 'admin' + } + + try { + await DbManager._insertOrUpdate(replicationDb, replicationRecord, replicationRecord._id) + //console.log(`${Utils.serverUri()}: Saved replication entry for ${endpointUri} (${replicatorId})`) + } catch (err) { + //console.log(`${Utils.serverUri()}: Error saving replication entry for ${endpointUri} (${replicatorId}): ${err.message}`) + throw new Error(`Unable to create replication entry: ${err.message}`) + } + } + else { + //console.log(`${Utils.serverUri()}: Unknown error fetching replication entry for ${endpointUri} (${replicatorId}): ${err.message}`) + throw err + } + } + } + } + + // @todo: Find any replication errors and handle them nicely + // @todo: Remove any replication entries for deleted databases + } + } let userManager = new UserManager(); diff --git a/src/components/utils.js b/src/components/utils.js index bda772af..6f1dd2ff 100644 --- a/src/components/utils.js +++ b/src/components/utils.js @@ -2,6 +2,23 @@ import EncryptionUtils from "@verida/encryption-utils" class Utils { + generateHash(value) { + return EncryptionUtils.hash(value).substring(2); + } + + generateReplicaterUsername(endpointUri) { + return `r${this.generateHash(endpointUri)}` + } + + generateDidContextHash(did, contextName) { + let text = [ + did.toLowerCase(), + contextName + ].join("/"); + + return this.generateHash(text) + } + generateUsername(did, contextName) { did = did.toLowerCase() const text = [ @@ -28,6 +45,19 @@ class Utils { return "v" + hash } + generateReplicatorHash(endpointUri, did, contextName) { + let text = [ + endpointUri, + did.toLowerCase(), + contextName + ].join("/"); + + const hash = EncryptionUtils.hash(text).substring(2); + + // Database name must start with a letter + return "e" + hash + } + didsToUsernames(dids, contextName) { return dids ? dids.map(did => this.generateUsername(did.toLowerCase(), contextName)) : [] } @@ -45,6 +75,24 @@ class Utils { }); } + async error(res, message, httpStatus=400) { + return res.status(httpStatus).send({ + status: "fail", + message + }) + } + + async success(res, data) { + return res.status(200).send({ + status: "success", + data + }) + } + + serverUri() { + return process.env.ENDPOINT_URI + } + } let utils = new Utils(); diff --git a/src/controllers/auth.js b/src/controllers/auth.js index 7c93503b..4dbb49ff 100644 --- a/src/controllers/auth.js +++ b/src/controllers/auth.js @@ -2,6 +2,8 @@ import UserManager from '../components/userManager.js'; import Utils from '../components/utils.js'; import AuthManager from '../components/authManager.js'; import Db from '../components/db.js'; +import Axios from 'axios' +import EncryptionUtils from '@verida/encryption-utils'; class AuthController { @@ -200,6 +202,111 @@ class AuthController { } } + /** + * Ensure replication credentials exist on the server + * + * If the password is an empty string, will just determine if the user exists or not + * If the password is not an empty string, it will update the password to match + * If no user exists, must specify a password + * + * Return status is either: + * 1. `created` (user created) + * 2. `updated` (password updated) + * 3. `exists` (user existed, but password unchanged) + * + * It's essential to ensure the user has the replication role that grants them + * access to all databases associated with a context + * + * @param {*} req + * @param {*} res + * @returns + */ + async replicationCreds(req, res) { + const { + endpointUri, // endpoint making the request + did, + contextName, + timestampMinutes, + password, + signature + } = req.body + + // Verify params + if (!endpointUri) { + return Utils.error(res, 'Endpoint not specified') + } + + if (!timestampMinutes) { + return Utils.error(res, 'Timestamp not specified') + } + + // Verify timestampMinutes is within two minutes of now + const currentTimestampMinutes = Math.floor(Date.now() / 1000 / 60) + const diff = currentTimestampMinutes - timestampMinutes + if (diff > 2 || diff < -2) { + return Utils.error(res, `Timestamp is out of range ${diff}`) + } + + if (!did) { + return Utils.error(res, 'DID not specified') + } + + if (!contextName) { + return Utils.error(res, 'Context not specified') + } + + if (!signature) { + return Utils.error(res, 'Signature not specified') + } + + // Lookup DID document and confirm endpointUri is a valid endpoint + const didDocument = await AuthManager.getDidDocument(did) + const endpointService = didDocument.locateServiceEndpoint(contextName, 'database') + const endpoints = endpointService.serviceEndpoint + if (endpoints.indexOf(endpointUri) === -1) { + return Utils.error(res, `Invalid endpoint (${endpointUri}): DID not linked (${did})`) + } + + // Confirm this endpoint is linked to the DID and context + const thisEndpointUri = Utils.serverUri() + if (endpoints.indexOf(thisEndpointUri) === -1) { + return Utils.error(res, `Invalid DID and context: Not associated with this endpoint`) + } + + // Pull endpoint public key from /status and verify the signature + let endpointPublicKey + try { + const response = await Axios.get(`${endpointUri}/status`) + + endpointPublicKey = response.data.results.publicKey + const params = { + did, + contextName, + endpointUri, + timestampMinutes, + password + } + + if (!EncryptionUtils.verifySig(params, signature, endpointPublicKey)) { + return Utils.error(res, 'Invalid signature', 401) + } + } catch (err) { + return Utils.error(res, `Unknown error: ${err.message}`) + } + + const didContextHash = Utils.generateDidContextHash(did, contextName) + const replicaterRole = `r${didContextHash}-replicater` + + try { + const result = await AuthManager.ensureReplicationCredentials(endpointUri, password, replicaterRole) + return Utils.signedResponse({ + result + }, res) + } catch (err) { + return Utils.error(res, err.message) + } + } + } const authController = new AuthController(); diff --git a/src/controllers/system.js b/src/controllers/system.js index a994df2b..313bb44b 100644 --- a/src/controllers/system.js +++ b/src/controllers/system.js @@ -17,7 +17,8 @@ class SystemController { maxUsers: parseInt(process.env.MAX_USERS), currentUsers, version: packageJson.version, - publicKey: wallet.publicKey + publicKey: wallet.publicKey, + couchUri: db.buildHost() } return Utils.signedResponse({ diff --git a/src/controllers/user.js b/src/controllers/user.js index dd7e7597..abe4eca4 100644 --- a/src/controllers/user.js +++ b/src/controllers/user.js @@ -16,7 +16,6 @@ class UserController { } }, res); } catch (err) { - console.error(err); return res.status(500).send({ status: "fail", message: err.message @@ -46,21 +45,28 @@ class UserController { }); } - const userUsage = await UserManager.getUsage(did, contextName) - if (userUsage.usagePercent >= 100) { - return res.status(400).send({ + try { + const userUsage = await UserManager.getUsage(did, contextName) + if (userUsage.usagePercent >= 100) { + return res.status(400).send({ + status: "fail", + message: 'Storage limit reached' + }); + } + } catch (err) { + return res.status(500).send({ status: "fail", - message: 'Storage limit reached' - }); + message: err.message + }) } const databaseHash = Utils.generateDatabaseName(did, contextName, databaseName) let success; try { - success = await DbManager.createDatabase(username, databaseHash, contextName, options); + success = await DbManager.createDatabase(did, username, databaseHash, contextName, options); if (success) { - await DbManager.saveUserDatabase(did, contextName, databaseName, databaseHash, options.permissions) + await DbManager.saveUserDatabase(did, username, contextName, databaseName, databaseHash, options.permissions) return Utils.signedResponse({ status: "success" @@ -154,7 +160,6 @@ class UserController { } // Update permissions on a user's database - // @todo: database name should be in plain text, then hashed async updateDatabase(req, res) { const username = req.tokenData.username const did = req.tokenData.did @@ -173,9 +178,9 @@ class UserController { }); } - let success = await DbManager.updateDatabase(username, databaseHash, contextName, options); + let success = await DbManager.updateDatabase(did, username, databaseHash, contextName, options); if (success) { - await DbManager.saveUserDatabase(did, contextName, databaseName, databaseHash, options.permissions) + await DbManager.saveUserDatabase(did, username, contextName, databaseName, databaseHash, options.permissions) return Utils.signedResponse({ status: "success" @@ -284,6 +289,26 @@ class UserController { } } + async checkReplication(req, res) { + const did = req.tokenData.did + const contextName = req.tokenData.contextName + const databaseName = req.body.databaseName + + try { + const result = await UserManager.checkReplication(did, contextName, databaseName) + + return Utils.signedResponse({ + status: "success", + result + }, res); + } catch (err) { + return res.status(500).send({ + status: "fail", + message: err.message + }); + } + } + } const userController = new UserController(); diff --git a/src/routes/private.js b/src/routes/private.js index fdc3090e..cb196fda 100644 --- a/src/routes/private.js +++ b/src/routes/private.js @@ -10,5 +10,6 @@ router.post('/user/deleteDatabases', UserController.deleteDatabases); router.post('/user/databases', UserController.databases); router.post('/user/databaseInfo', UserController.databaseInfo); router.post('/user/usage', UserController.usage); +router.post('/user/checkReplication', UserController.checkReplication); export default router; \ No newline at end of file diff --git a/src/routes/public.js b/src/routes/public.js index c78e12f4..c32bd7c3 100644 --- a/src/routes/public.js +++ b/src/routes/public.js @@ -10,6 +10,7 @@ const router = express.Router(); router.get('/auth/public', UserController.getPublic); router.get('/status', SystemController.status); +router.post('/auth/replicationCreds', AuthController.replicationCreds); router.post('/auth/generateAuthJwt', AuthController.generateAuthJwt); router.post('/auth/authenticate', AuthController.authenticate); router.post('/auth/connect', AuthController.connect); diff --git a/test/replication.js b/test/replication.js new file mode 100644 index 00000000..2a9b5f00 --- /dev/null +++ b/test/replication.js @@ -0,0 +1,488 @@ +import Axios from 'axios' +import assert from 'assert'; +import { ethers } from 'ethers' +import { DIDDocument } from '@verida/did-document' +import { DIDClient } from '@verida/did-client'; +import { AutoAccount } from "@verida/account-node" +import { Keyring } from '@verida/keyring'; +import ComponentUtils from '../src/components/utils' +import CouchDb from 'nano' + +import dotenv from 'dotenv'; +dotenv.config(); + +import Utils from './utils' +import CONFIG from './config' + +// Enable verbose logging of what the tests are doing +const LOGGING_ENABLED = false + +// Use a pre-built mnemonic where the first private key is a Verida DID private key +const MNEMONIC = 'pave online install gift glimpse purpose truth loan arm wing west option' + +// Context name to use for the tests +const CONTEXT_NAME = 'Verida Test: Storage Node Replication' + +// Endpoints to use for testing +// WARNING!!! +// Only ever use local network endpoints. +// These tests will delete the `_replicator` database and `verida_replicator_creds` on +// ALL endpoints when they are complete. +const ENDPOINT_DSN = { + 'http://192.168.68.117:5000': 'http://admin:admin@192.168.68.117:5984', + 'http://192.168.68.118:5000': 'http://admin:admin@192.168.68.118:5984', +} +const ENDPOINTS = Object.keys(ENDPOINT_DSN) +const ENDPOINTS_DID = ENDPOINTS.map(item => `${item}/did/`) +const ENDPOINTS_COUCH = {} +ENDPOINTS.forEach(key => { + ENDPOINTS_COUCH[key] = key.replace('5000', '5984') +}) +const TEST_DATABASES = ['db1', 'db2', 'db3'] +const TEST_DEVICE_ID = 'Device 1' + +const didClient = new DIDClient(CONFIG.DID_CLIENT_CONFIG) + +function buildEndpointConnection(externalEndpoint, endpointCreds) { + return new CouchDb({ + url: externalEndpoint, + requestDefaults: { + headers: endpointCreds, + rejectUnauthorized: process.env.DB_REJECT_UNAUTHORIZED_SSL.toLowerCase() !== "false" + } + }) +} + +function log(output) { + if (LOGGING_ENABLED) { + console.log(output) + } +} + +/** + * WARNING: ONLY RUN THIS TEST ON LOCALHOST + * + * It deletes `_replicator` and `verida_replicator_creds` databases on all CouchDB + * endpoints upon completion of the tests. + * + * This is necessary to reset the couch instnaces to a known state (empty) + * + * Note: CouchDB replicator interval must be set to 2 seconds (in couch config) + * to ensure replication is activated during these tests + */ + +describe("Replication tests", function() { + let DID, DID_ADDRESS, DID_PUBLIC_KEY, DID_PRIVATE_KEY, keyring, wallet, account, AUTH_TOKENS, TEST_DATABASE_HASH + let REPLICATOR_CREDS = {} + + describe("Create test databases", async () => { + this.timeout(200 * 1000) + + this.beforeAll(async () => { + // Create a new VDA private key + if (MNEMONIC) { + wallet = ethers.Wallet.fromMnemonic(MNEMONIC) + } else { + wallet = ethers.Wallet.createRandom() + } + + DID_ADDRESS = wallet.address + DID = `did:vda:testnet:${DID_ADDRESS}` + DID_PUBLIC_KEY = wallet.publicKey + DID_PRIVATE_KEY = wallet.privateKey + keyring = new Keyring(wallet.mnemonic.phrase) + await didClient.authenticate(DID_PRIVATE_KEY, 'web3', CONFIG.DID_CLIENT_CONFIG.web3Config, ENDPOINTS_DID) + + TEST_DATABASE_HASH = TEST_DATABASES.map(item => ComponentUtils.generateDatabaseName(DID, CONTEXT_NAME, item)) + + log(DID_ADDRESS, DID, DID_PRIVATE_KEY, DID_PUBLIC_KEY, wallet.mnemonic.phrase) + + // Create a new VDA account using our test endpoints + account = new AutoAccount({ + defaultDatabaseServer: { + type: 'VeridaDatabase', + endpointUri: ENDPOINTS + }, + defaultMessageServer: { + type: 'VeridaMessage', + endpointUri: ENDPOINTS + }, + }, { + privateKey: wallet.privateKey, + didClientConfig: CONFIG.DID_CLIENT_CONFIG, + environment: CONFIG.ENVIRONMENT + }) + + // Create new DID document (using DIDClient) for the private key with two testing endpoints (local) + let doc = await didClient.get(DID) + if (!doc) { + doc = new DIDDocument(DID, DID_PUBLIC_KEY) + } + await doc.addContext(CONTEXT_NAME, keyring, DID_PRIVATE_KEY, { + database: { + type: 'VeridaDatabase', + endpointUri: ENDPOINTS + }, + messaging: { + type: 'VeridaMessage', + endpointUri: ENDPOINTS + }, + }) + + try { + const endpointResponses = await didClient.save(doc) + } catch (err) { + log(err) + log(didClient.getLastEndpointErrors()) + } + + // Fetch an auth token for each server + AUTH_TOKENS = {} + for (let i in ENDPOINTS) { + const endpoint = ENDPOINTS[i] + log(`Authenticating with ${endpoint}`) + const authJwtResult = await Axios.post(`${endpoint}/auth/generateAuthJwt`, { + did: DID, + contextName: CONTEXT_NAME + }); + + const authRequestId = authJwtResult.data.authJwt.authRequestId + const authJwt = authJwtResult.data.authJwt.authJwt + const consentMessage = `Authenticate this application context: "${CONTEXT_NAME}"?\n\n${DID.toLowerCase()}\n${authRequestId}` + const signature = await account.sign(consentMessage) + + const authenticateResponse = await Axios.post(`${endpoint}/auth/authenticate`, { + authJwt, + did: DID, + contextName: CONTEXT_NAME, + signature, + deviceId: TEST_DEVICE_ID + }) + AUTH_TOKENS[endpoint] = authenticateResponse.data.accessToken + } + }) + + // Create the test databases on the first endpoint + it.only('can create the test databases on the endpoints', async () => { + for (let i in ENDPOINTS) { + let endpoint = ENDPOINTS[i] + for (let i in TEST_DATABASES) { + const dbName = TEST_DATABASES[i] + const response = await Utils.createDatabase(dbName, DID, CONTEXT_NAME, AUTH_TOKENS[endpoint], endpoint) + assert.equal(response.data.status, 'success', 'database created') + } + } + }) + + // Call `checkReplication(db1)` on all the endpoints (first database only) + it.only('can initialise replication for one database via checkReplication()', async () => { + // @todo: fix code so endpoint doesn't create replication entries to itself + try { + for (let i in ENDPOINTS) { + const endpoint = ENDPOINTS[i] + log(`${endpoint}: Calling checkReplication() on for ${TEST_DATABASES[0]}`) + const result = await Utils.checkReplication(endpoint, AUTH_TOKENS[endpoint], TEST_DATABASES[0]) + assert.equal(result.data.status, 'success', 'Check replication completed successfully') + } + + // Sleep 5ms to have replication time to initialise + log('Sleeping so replication has time to do its thing...') + await Utils.sleep(5000) + + for (let i in ENDPOINTS) { + const endpoint = ENDPOINTS[i] + const couch = new CouchDb({ + url: ENDPOINT_DSN[endpoint], + requestDefaults: { + rejectUnauthorized: process.env.DB_REJECT_UNAUTHORIZED_SSL.toLowerCase() !== "false" + } + }) + const conn = couch.db.use('_replicator') + + // Check replications entries have been created for all the other endpoints (but not this endpoint) + for (let e in ENDPOINTS) { + const endpointCheckUri = ENDPOINTS[e] + if (endpointCheckUri == endpoint) { + continue + } + + const replicatorId = ComponentUtils.generateReplicatorHash(endpointCheckUri, DID, CONTEXT_NAME) + const dbHash = ComponentUtils.generateDatabaseName(DID, CONTEXT_NAME, TEST_DATABASES[0]) + log(`${endpoint}: (${endpointCheckUri}) Locating _replication entry for ${TEST_DATABASES[0]} (${replicatorId}-${dbHash})`) + + let replicationEntry + try { + replicationEntry = await conn.get(`${replicatorId}-${dbHash}`) + } catch (err) { + log('pouchdb connection error') + log(err.message) + assert.fail('Replication record not created') + } + + // Check info is accurate + assert.ok(replicationEntry) + assert.ok(replicationEntry.source, `Have a source for ${endpointCheckUri}`) + assert.ok(replicationEntry.target, `Have a target for ${endpointCheckUri}`) + assert.equal(replicationEntry.source.url, `http://localhost:5984/${dbHash}`, `Source URI is correct for ${endpointCheckUri}`) + assert.equal(replicationEntry.target.url, `${ENDPOINTS_COUCH[endpointCheckUri]}/${dbHash}`, `Destination URI is correct for ${endpointCheckUri}`) + + REPLICATOR_CREDS[endpoint] = replicationEntry.target.headers + + const replicationResponse = await Axios.get(`${ENDPOINT_DSN[endpoint]}/_scheduler/docs/_replicator/${replicatorId}-${dbHash}`) + assert.ok(replicationResponse, 'Have a replication job') + + const status = replicationResponse.data + assert.ok(['pending', 'running'].indexOf(status.state) !== -1, 'Replication is active') + } + } + } catch (err) { + log(err) + assert.fail('error') + } + }) + + it.only('verify replication user can write to first database', async () => { + const endpoint0 = ENDPOINTS[0] + const endpoint1 = ENDPOINTS[1] + + const couch = buildEndpointConnection(ENDPOINT_DSN[endpoint0], REPLICATOR_CREDS[endpoint1]) + + log(`${endpoint0}: Creating three test records on ${TEST_DATABASES[0]} (${TEST_DATABASE_HASH[0]}) using credentials from ${endpoint1}`) + const endpoint0db1Connection = couch.db.use(TEST_DATABASE_HASH[0]) + const result1 = await endpoint0db1Connection.insert({_id: '1', sourceEndpoint: endpoint0}) + assert.ok(result1.ok, 'Record 1 saved') + const result2 = await endpoint0db1Connection.insert({_id: '2', sourceEndpoint: endpoint0}) + assert.ok(result2.ok, 'Record 2 saved') + const result3 = await endpoint0db1Connection.insert({_id: '3', sourceEndpoint: endpoint0}) + assert.ok(result3.ok, 'Record 3 saved') + }) + + // Verify data saved to db1 is being replicated for all endpoints + it.only('verify data is replicated on all endpoints for first database', async () => { + // Sleep 5ms to have replication time to do its thing + log('Sleeping so replication has time to do its thing...') + await Utils.sleep(5000) + + // Check the three records are correctly replicated on all the other databases + for (let i in ENDPOINTS) { + if (i == 0) { + // skip first endpoint + continue + } + + const externalEndpoint = ENDPOINTS[i] + + // Connect to the external endpoint, using the credentials from the + // first endpoint to confirm it has access (plus admin user doesnt have access) + const couch = buildEndpointConnection(ENDPOINTS_COUCH[externalEndpoint], REPLICATOR_CREDS[ENDPOINTS[0]]) + const conn = couch.db.use(TEST_DATABASE_HASH[0]) + + log(`${externalEndpoint}: Verifying endpoint has docs`) + const docs = await conn.list({include_docs: true}) + + // Note: There is a design document, which is why the number is actually 4 + assert.equal(docs.rows.length, 4, `Three rows returned from ${externalEndpoint}/${TEST_DATABASES[0]} (${TEST_DATABASE_HASH[0]})`) + } + }) + + it.only('can initialise replication for all database via checkReplication()', async () => { + for (let i in ENDPOINTS) { + const endpoint = ENDPOINTS[i] + log(`${endpoint}: Calling checkReplication() on all databases for ${endpoint}`) + const result = await Utils.checkReplication(endpoint, AUTH_TOKENS[endpoint]) + assert.equal(result.data.status, 'success', 'Check replication completed successfully') + } + }) + + it.only('verify data is being replicated for all databases and endpoints', async () => { + // Sleep 5ms to have replication time to initialise + log('Sleeping so replication has time to do its thing...') + await Utils.sleep(5000) + + let recordCount = 0 + // Create data on every database, on every endpoint, and verify on every other endpoint + for (let i in TEST_DATABASES) { + // skip first database as we've already used it + if (i == 0) { + continue + } + + const dbName = TEST_DATABASES[i] + const dbHash = TEST_DATABASE_HASH[i] + const createdDatabaseIds = [] + + log(`${dbName} (${dbHash}): Creating a record on every endpoint`) + for (let e in ENDPOINTS) { + const endpoint = ENDPOINTS[e] + + // Use the credentials of a different server as the local server doesn't have permissions + // to write (even as admin) + let creds + if (e == 0) { + creds = REPLICATOR_CREDS[ENDPOINTS[1]] + } else { + creds = REPLICATOR_CREDS[ENDPOINTS[0]] + } + + // create a record on this endpoint + const couch = buildEndpointConnection(ENDPOINTS_COUCH[endpoint], creds) + const conn = couch.db.use(dbHash) + const id = String(recordCount++) + createdDatabaseIds.push(id) + await conn.insert({_id: id, dbName, dbHash, endpoint}) + } + + log(`${dbName} (${dbHash}): Done (${createdDatabaseIds.length}). Sleeping for replication to do its thing...`) + await Utils.sleep(5000) + + for (let e in ENDPOINTS) { + const endpoint = ENDPOINTS[e] + + // Use the credentials of a different server as the local server doesn't have permissions + // to write (even as admin) + let creds + if (e == 0) { + creds = REPLICATOR_CREDS[ENDPOINTS[1]] + } else { + creds = REPLICATOR_CREDS[ENDPOINTS[0]] + } + + // create a record on this endpoint + const couch = buildEndpointConnection(ENDPOINTS_COUCH[endpoint], creds) + const conn = couch.db.use(dbHash) + + // confirm all the records exist + for (let j in createdDatabaseIds) { + const createdId = createdDatabaseIds[j] + const result = await conn.get(createdId) + assert.equal(result._id, createdId, 'Record deleted') + } + } + } + }) + + it.only('can delete a database', async () => { + // delete a database from all endpoints + for (let e in ENDPOINTS) { + const endpoint = ENDPOINTS[e] + const response = await Utils.deleteDatabase(TEST_DATABASES[0], DID, CONTEXT_NAME, AUTH_TOKENS[endpoint], endpoint) + assert.ok(response.data.status, 'success', `Database ${TEST_DATABASES[0]} deleted from ${endpoint}`) + } + }) + + it.only('verify database is completely deleted from all endpoints', async () => { + const dbHash = TEST_DATABASE_HASH[0] + + for (let e in ENDPOINTS) { + const endpoint = ENDPOINTS[e] + + // Use the credentials of a different server as the local server doesn't have permissions + // to write (even as admin) + let creds + if (e == 0) { + creds = REPLICATOR_CREDS[ENDPOINTS[1]] + } else { + creds = REPLICATOR_CREDS[ENDPOINTS[0]] + } + + const couch = buildEndpointConnection(ENDPOINTS_COUCH[endpoint], creds) + + // verify database is deleted from each endpoint + log(`${endpoint}: Verifying database is deleted (${TEST_DATABASES[0]}) from ${endpoint}`) + const dbConn = couch.db.use(dbHash) + try { + await dbConn.get('0') + assert.fail(`${dbHash} wasnt deleted from ${endpoint}`) + } catch (err) { + assert.equal(err.reason, 'Database does not exist.') + } + + // verify all replication entries for the database is removed from this endpoint + const couchAdmin = new CouchDb({ + url: ENDPOINT_DSN[endpoint], + requestDefaults: { + rejectUnauthorized: process.env.DB_REJECT_UNAUTHORIZED_SSL.toLowerCase() !== "false" + } + }) + + const replicationConn = couchAdmin.db.use('_replicator') + + log(`${endpoint}: Verifying all replication entries are deleted (${TEST_DATABASES[0]}) from ${endpoint}`) + for (let i in ENDPOINTS) { + const endpointCheckUri = ENDPOINTS[i] + if (endpointCheckUri == endpoint) { + continue + } + + const replicatorId = ComponentUtils.generateReplicatorHash(endpointCheckUri, DID, CONTEXT_NAME) + const dbHash = ComponentUtils.generateDatabaseName(DID, CONTEXT_NAME, TEST_DATABASES[0]) + log(`${endpoint}: Verifying replication entry for ${endpointCheckUri} is deleted from endpoint ${endpoint} (${replicatorId}-${dbHash})`) + + try { + await replicationConn.get(`${replicatorId}-${dbHash}`) + } catch (err) { + assert.equal(err.error, 'not_found', 'Replication entry not found') + } + } + + } + }) + + // can handle a storage node that goes down at any part in the process + }) + + // WARNING: This should never run on production! + this.afterAll(async () => { + //return + log('Destroying _replicator, verida_replicater_creds and test databases on ALL endpoints') + + for (let endpoint in ENDPOINT_DSN) { + const conn = new CouchDb({ + url: ENDPOINT_DSN[endpoint], + requestDefaults: { + rejectUnauthorized: process.env.DB_REJECT_UNAUTHORIZED_SSL.toLowerCase() !== "false" + } + }) + + // Clear replication related databases to reset them for the next run + try { + await conn.db.destroy('_replicator') + } catch (err) {} + try { + await conn.db.destroy('verida_replicater_creds') + } catch (err) {} + await conn.db.create('_replicator') + await conn.db.create('verida_replicater_creds') + + // Delete test databases + for (let d in TEST_DATABASE_HASH) { + const databaseName = TEST_DATABASE_HASH[d] + try { + log(`Destroying ${databaseName}`) + await conn.db.destroy(databaseName) + } catch (err) {} + } + + // Delete created replication users + for (let i in ENDPOINTS) { + const endpointExternal = ENDPOINTS[i] + if (endpointExternal == endpoint) { + continue + } + + try { + const username = ComponentUtils.generateReplicaterUsername(endpointExternal) + const users = conn.db.use('_users') + log(`Deleting replication user ${username} for ${endpointExternal} from ${endpoint}`) + const doc = await users.get(`org.couchdb.user:${username}`) + await users.destroy(`org.couchdb.user:${username}`, doc._rev) + } catch (err) { + if (err.error != 'not_found') { + log(`Unable to delete user`) + log(err) + } + } + } + } + }) +}) \ No newline at end of file diff --git a/test/server.js b/test/server.js index 16d61e12..30127d89 100644 --- a/test/server.js +++ b/test/server.js @@ -279,7 +279,24 @@ describe("Server tests", function() { assert.equal(response.data.status, "success", "Successful delete response") - const response2 = await Axios.post(`${SERVER_URL}/user/deleteDatabase`, { + // confirm the database doesn't exist + try { + const response2 = await Axios.post(`${SERVER_URL}/user/databaseInfo`, { + databaseName, + did: accountInfo.did, + contextName: CONTEXT_NAME + }, { + headers: { + Authorization: `Bearer ${accessToken}` + } + }); + + assert.fail('Expected a 404 because the database shouldnt be found') + } catch (err) { + assert.equal(err.response.data.message, 'Database not found', 'Database not found') + } + + const response3 = await Axios.post(`${SERVER_URL}/user/deleteDatabase`, { databaseName: databaseName2, did: accountInfo.did, contextName: CONTEXT_NAME @@ -288,6 +305,7 @@ describe("Server tests", function() { Authorization: `Bearer ${accessToken}` } }); + assert.equal(response3.data.status, 'success', 'Successful delete response') assert.ok(TestUtils.verifySignature(response), 'Have a valid signature in response') }) diff --git a/test/utils.js b/test/utils.js index 5f45f116..8538239d 100644 --- a/test/utils.js +++ b/test/utils.js @@ -61,8 +61,20 @@ class Utils { }); } - async createDatabase(databaseName, did, contextName, accessToken) { - const response = await Axios.post(`${CONFIG.SERVER_URL}/user/createDatabase`, { + buildPouchDsn(dsn, dbName) { + return new PouchDb(`${dsn}/${dbName}`, { + requestDefaults: { + rejectUnauthorized: false + } + }); + } + + async createDatabase(databaseName, did, contextName, accessToken, serverUrl) { + if (!serverUrl) { + serverUrl = CONFIG.SERVER_URL + } + + const response = await Axios.post(`${serverUrl}/user/createDatabase`, { databaseName, did, contextName @@ -75,6 +87,18 @@ class Utils { return response } + async checkReplication(endpointUri, accessToken, databaseName) { + const response = await Axios.post(`${endpointUri}/user/checkReplication`, { + databaseName + }, { + headers: { + Authorization: `Bearer ${accessToken}` + } + }); + + return response + } + signString(str, privateKey) { if (privateKey == 'string') { privateKey = new Uint8Array(Buffer.from(privateKey.substr(2),'hex')) diff --git a/yarn.lock b/yarn.lock index a29153b2..c705ebc5 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2116,6 +2116,15 @@ axios@^0.27.2: follow-redirects "^1.14.9" form-data "^4.0.0" +axios@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.2.1.tgz#44cf04a3c9f0c2252ebd85975361c026cb9f864a" + integrity sha512-I88cFiGu9ryt/tfVEi4kX2SITsvDddTajXTOFmt2uK1ZVA8LytjtdeyefdQWEf5PU8w+4SSJDoYnggflB5tW4A== + dependencies: + follow-redirects "^1.15.0" + form-data "^4.0.0" + proxy-from-env "^1.1.0" + babel-plugin-polyfill-corejs2@^0.3.2: version "0.3.2" resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.2.tgz#e4c31d4c89b56f3cf85b92558954c66b54bd972d"