diff --git a/README.md b/README.md index 0b91725..fff593e 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,41 @@ Platform to run and keep the history of benchmark runs. - [Prometheus](https://prometheus.io) - Store metrics for benchmark results. - [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) - Send benchmark metrics to Prometheus. +## Uploading Benchmarks to Supabench + +To upload a benchmark to supabench, you need to create a zip archive of your benchmark folder. + +### How to Create a Zip Archive + +1. **Navigate to your benchmark folder** (e.g., `examples/realtime/broadcast-from-client`) + +2. **Create a zip file** containing all the files in the folder: + ```bash + # From inside the benchmark folder + zip -r benchmark.zip . + ``` + +3. **Upload the zip file** through the supabench UI when creating or updating a benchmark secret. + +### Example Structure + +Your zip file should contain a structure like this: +``` +benchmark.zip +├── main.tf +├── variables.tf +├── k6/ +│ ├── common.js +│ ├── subs.js +│ ├── Makefile +│ └── summary.js +└── modules/ + └── script/ + ├── main.tf + ├── variables.tf + └── entrypoint.sh.tpl +``` + ## More Info More information about the project can be found on the [Github Wiki](https://github.com/supabase/supabench/wiki) diff --git a/examples/realtime/broadcast-from-client/k6/Makefile b/examples/realtime/broadcast-from-client/k6/Makefile new file mode 100644 index 0000000..ad87034 --- /dev/null +++ b/examples/realtime/broadcast-from-client/k6/Makefile @@ -0,0 +1,19 @@ +.PHONY: db_test + +MAKEFLAGS += -j2 + +export + +conns ?= 8 +shift ?= 0 +duration ?= 60 +messages_per_second ?= 60 +message_size_kb ?= 1 +rand = $(shell bash -c 'echo $$RANDOM') +testrun ?= "random-run-$(rand)" + +db_test: subs + +subs: + @DURATION=$(duration) CONNS=$(conns) SHIFT=$(shift) MESSAGES_PER_SECOND=$(messages_per_second) MESSAGE_SIZE_KB=$(message_size_kb) PRESENCE_ENABLED=$${PRESENCE_ENABLED:-false} TEST_RUN=$(testrun) \ + ./k6 run subs.js --tag testrun=$(testrun) -o 'prometheus=namespace=k6' diff --git a/examples/realtime/broadcast-from-client/k6/common.js b/examples/realtime/broadcast-from-client/k6/common.js new file mode 100644 index 0000000..c2b58d6 --- /dev/null +++ b/examples/realtime/broadcast-from-client/k6/common.js @@ -0,0 +1,47 @@ +/** + * Return a random integer between the minimum (inclusive) + * and maximum (exclusive) values + * @param {number} min - The minimum value to return. + * @param {number} max - The maximum value you want to return. + * @return {number} The random number between the min and max. + */ +export function getRandomInt(min, max) { + min = Math.ceil(min) + max = Math.floor(max) + // The maximum is exclusive and the minimum is inclusive + return Math.floor(Math.random() * (max - min) + min) +} + +/** + * Generate default k6 ramping-vus scenario. + * @param {number} baseDuration - Total duration of the scenario. + * @param {number} conns - max number of vus during the scenario execution. + * + * It starts with 0 VUs, ramps up to half the number of connections in 1/12 of total duration then + * it remains on this number for 1/4 of total duration time. + * Then ramps down to a quarter of the number of connections in 1/12 of total duration. + * Then ramps up to the full number of connections in 1/6 of total duration and + * it remains on this number for 1/3 of total duration time. + * Then ramps down to a quarter of the number of connections in 1/12 of total duration, + * then ramps down to 0 VUs in 10s. + */ +export function scenario(baseDuration, conns) { + return { + executor: 'ramping-vus', + startVUs: 0, + stages: [ + { + duration: `${(5 * parseInt(baseDuration)) / 6}s`, + target: parseInt(conns), + }, + { + duration: `${parseInt(baseDuration) / 6}s`, + target: parseInt(conns), + }, + ], + gracefulRampDown: '20s', + } +} + +/* Exporting an array of default summaryTrendStats to be used in summary result. */ +export const trends = ['avg', 'med', 'p(99)', 'p(95)', 'p(0.1)', 'count'] diff --git a/examples/realtime/broadcast-from-client/k6/subs.js b/examples/realtime/broadcast-from-client/k6/subs.js new file mode 100644 index 0000000..8285689 --- /dev/null +++ b/examples/realtime/broadcast-from-client/k6/subs.js @@ -0,0 +1,262 @@ +import { check } from "k6"; +import http from "k6/http"; +import ws from "k6/ws"; +import { SharedArray } from "k6/data"; +import { Trend, Counter } from "k6/metrics"; +import { scenario } from "k6/execution"; +import { getRandomInt, scenario as sc, trends } from "./common.js"; +export { handleSummary } from "./summary.js"; + +const users = new SharedArray("users", function () { + return JSON.parse(open("./users.json")); +}); + +const token = __ENV.MP_TOKEN; +const authURI = __ENV.AUTH_URI + ? __ENV.AUTH_URI + : "https://proj.supabase.com/auth/v1"; +const socketURI = __ENV.MP_URI + ? __ENV.MP_URI + : "wss://proj.supabase.com/realtime/v1/websocket"; + +const conns = __ENV.CONNS ? parseInt(__ENV.CONNS) : 10; +const shift = __ENV.SHIFT ? parseInt(__ENV.SHIFT) : 0; +const messagesPerSecond = __ENV.MESSAGES_PER_SECOND + ? parseInt(__ENV.MESSAGES_PER_SECOND) + : 60; +const messageSizeKB = __ENV.MESSAGE_SIZE_KB + ? parseInt(__ENV.MESSAGE_SIZE_KB) + : 1; +const baseDuration = __ENV.DURATION ? __ENV.DURATION : 60; +const duration = parseInt(baseDuration) + 30; +const presenceEnabled = + __ENV.PRESENCE_ENABLED === "true" || __ENV.PRESENCE_ENABLED === "1"; +const broadcastInterval = 1000; +const latencyTrend = new Trend("latency_trend"); +const counterReceived = new Counter("received_updates"); + +const to = {}; + +export const options = { + vus: 1, + thresholds: to, + summaryTrendStats: trends, + scenarios: { + broadcast_authenticated: sc(baseDuration, conns), + }, +}; + +export default () => { + const user = users[(scenario.iterationInTest + shift) % users.length]; + const authToken = getUserToken(user); + const headers = { + Authorization: `Bearer ${authToken}`, + apikey: token, + }; + const channelsResponse = http.get( + `${authURI.replace("auth", "rest")}/channel_names?select=name`, + { headers } + ); + + const channels = channelsResponse.json().map((c) => c.name); + const URL = `${socketURI}?apikey=${token}`; + const joinedChannels = new Set(); + let broadcastIntervalId = null; + + const res = ws.connect(URL, {}, (socket) => { + socket.on("open", () => { + channels.map((room) => + socket.send(createJoinMessage(room, authToken, presenceEnabled)) + ); + channels.map((room) => + socket.send(createAccessTokenMessage(room, authToken)) + ); + + socket.setInterval( + () => socket.send(createHeartbeatMessage()), + 25 * 1000 + ); + }); + + socket.on("message", (msg) => { + const now = Date.now(); + msg = JSON.parse(msg); + + if ( + msg.event === "phx_reply" && + msg.payload && + msg.payload.status === "ok" + ) { + const channelName = msg.topic.replace("realtime:", ""); + joinedChannels.add(channelName); + console.log( + `Successfully joined channel: ${channelName} (${joinedChannels.size}/${channels.length})` + ); + + check(msg, { + "subscribed to realtime": (msg) => msg.payload.status === "ok", + }); + + if (joinedChannels.size === channels.length && !broadcastIntervalId) { + console.log("All channels joined, starting broadcast"); + broadcastIntervalId = socket.setInterval(() => { + const messagesToSend = Math.ceil(messagesPerSecond); + + const sendMessage = (index) => { + let rand = 0; + if (messagesToSend > 1) { + rand = getRandomInt(0, messagesToSend); + } + + const start = Date.now(); + const randomChannel = channels[getRandomInt(0, channels.length)]; + socket.send( + createBroadcastMessage(randomChannel, createMessage()) + ); + const finish = Date.now(); + + const sleepTime = + ((messagesToSend - rand) / messagesToSend) * + (broadcastInterval / 1000) - + (finish - start) / 1000; + + if (index + 1 < messagesToSend) { + if (sleepTime > 0) { + socket.setTimeout( + () => sendMessage(index + 1), + sleepTime * 1000 + ); + } else { + sendMessage(index + 1); + } + } + }; + + if (messagesToSend > 0) { + sendMessage(0); + } + }, broadcastInterval); + } + } + + if (msg.event !== "broadcast") { + return; + } + + const type = msg.payload.event; + let updated = 0; + if (msg.payload.payload) { + updated = msg.payload.payload.created_at; + } + console.log(`Message received: ${JSON.stringify(msg)}`); + latencyTrend.add(now - updated, { type: type }); + counterReceived.add(1); + + check(msg, { + "got realtime notification": (msg) => msg.event === "broadcast", + }); + }); + + socket.on("error", (e) => { + if (e.error() != "websocket: close sent") { + console.error("An unexpected error occurred: ", e.error()); + } + }); + + socket.setTimeout(function () { + socket.close(); + }, duration * 1000); + }); + + check(res, { "status is 101": (r) => r && r.status === 101 }); +}; + +function getUserToken(user) { + const loginRes = http.post( + `${authURI}/token?grant_type=password`, + JSON.stringify({ + email: user.email, + password: user.password, + }), + { + headers: { + apikey: token, + "Content-Type": "application/json", + }, + } + ); + + const authToken = loginRes.json("access_token"); + check(authToken, { + "logged in successfully": () => loginRes.status === 200 && authToken, + }); + return authToken.toString(); +} + +function createJoinMessage(room, authToken, presenceEnabled) { + const presenceConfig = presenceEnabled ? { key: "" } : { enabled: false }; + return JSON.stringify({ + topic: `realtime:${room}`, + event: "phx_join", + payload: { + config: { + broadcast: { + self: false, + }, + presence: presenceConfig, + private: true, + }, + access_token: authToken, + }, + ref: "1", + join_ref: "1", + }); +} + +function createAccessTokenMessage(room, authToken) { + return JSON.stringify({ + topic: `realtime:${room}`, + event: "access_token", + payload: { + access_token: authToken, + }, + ref: "2", + }); +} + +function createHeartbeatMessage() { + return JSON.stringify({ + topic: "phoenix", + event: "heartbeat", + payload: {}, + ref: 0, + }); +} + +function createBroadcastMessage(channel, messagePayload) { + return JSON.stringify({ + topic: `realtime:${channel}`, + event: "broadcast", + payload: { + event: "new message", + payload: messagePayload, + }, + ref: 0, + }); +} + +function createMessage() { + const chars = + "!#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_abcdefghijklmnopqrstuvwxyz{|}~"; + const messageLength = 10 * 1000; + let payload = ""; + + for (let i = 0; i < messageLength; i++) { + payload += chars[Math.floor(Math.random() * chars.length)]; + } + + return { + created_at: Date.now(), + message: payload, + }; +} diff --git a/examples/realtime/broadcast-from-client/k6/summary.js b/examples/realtime/broadcast-from-client/k6/summary.js new file mode 100644 index 0000000..5f04bef --- /dev/null +++ b/examples/realtime/broadcast-from-client/k6/summary.js @@ -0,0 +1,79 @@ +import http from 'k6/http' +import { textSummary } from 'https://jslib.k6.io/k6-summary/0.0.1/index.js' + +/* Setting up the environment variables for the test run. */ +const testrun = __ENV.TEST_RUN +const origin = __ENV.TEST_ORIGIN +const benchmark = __ENV.BENCHMARK_ID +const run = __ENV.RUN_ID +const token = __ENV.SUPABENCH_TOKEN +const supabench_uri = __ENV.SUPABENCH_URI + ? __ENV.SUPABENCH_URI + : 'http://localhost:8090' + +/** + * Handle summary implementation that additionally sends the data to the reports server. + */ +export function handleSummary(data) { + console.log('Preparing the end-of-test summary...') + const started = Date.now() + + // Send the results to remote server + if (!run) { + const report = { + output: textSummary(data, { indent: ' ', enableColors: false }), + raw: data, + benchmark_id: benchmark, + name: testrun ? testrun : null, + status: 'success', + origin: origin, + started_at: `${started - 60 * 1000}`, + ended_at: `${ + started + parseInt(data.state.testRunDurationMs) + 60 * 1000 + }`, + } + + const resp = http.post( + `${supabench_uri}/api/collections/runs/records`, + JSON.stringify(report), + { + headers: { + 'Content-Type': 'application/json', + Authorization: `Admin ${token}`, + }, + } + ) + if (resp.status != 200) { + console.error('Could not send summary, got status ' + resp.status) + } + } else { + const report = { + output: textSummary(data, { indent: ' ', enableColors: false }), + raw: data, + status: 'success', + started_at: `${started - 120 * 1000}`, + ended_at: `${ + started + parseInt(data.state.testRunDurationMs) + 15 * 1000 + }`, + } + + const resp = http.patch( + `${supabench_uri}/api/collections/runs/records/${run}`, + JSON.stringify(report), + { + headers: { + 'Content-Type': 'application/json', + Authorization: `Admin ${token}`, + }, + } + ) + if (resp.status != 200) { + console.error('Could not send summary, got status ' + resp.status) + } + } + + return { + stdout: textSummary(data, { indent: ' ', enableColors: true }), // Show the text summary to stdout... + 'summary.json': JSON.stringify(data), // and a JSON with all the details... + } +} diff --git a/examples/realtime/broadcast-from-client/main.tf b/examples/realtime/broadcast-from-client/main.tf new file mode 100644 index 0000000..a00314f --- /dev/null +++ b/examples/realtime/broadcast-from-client/main.tf @@ -0,0 +1,43 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "4.0.0" + } + } +} + +provider "aws" { + region = "ap-southeast-1" +} + +module "script" { + source = "./modules/script" + + ami_id = var.ami_id + instances_count = var.instances_count + instance_type = var.instance_type + security_group_id = var.security_group_id + subnet_id = var.subnet_id + sut_name = var.sut_name + key_name = var.key_name + private_key_location = var.private_key_location + + testrun_name = var.testrun_name + testrun_id = var.testrun_id + test_origin = var.test_origin + benchmark_id = var.benchmark_id + supabench_token = var.supabench_token + supabench_uri = var.supabench_uri + + conns = var.conns + duration = var.duration + messages_per_second = var.messages_per_second + message_size_kb = var.message_size_kb + pg_pass = var.pg_pass + pg_host = var.pg_host + mp_token = var.mp_token + mp_uri = var.mp_uri + auth_uri = var.auth_uri + presence_enabled = var.presence_enabled +} diff --git a/examples/realtime/broadcast-from-client/modules/script/entrypoint.sh.tpl b/examples/realtime/broadcast-from-client/modules/script/entrypoint.sh.tpl new file mode 100644 index 0000000..abe1b01 --- /dev/null +++ b/examples/realtime/broadcast-from-client/modules/script/entrypoint.sh.tpl @@ -0,0 +1,34 @@ +#!/bin/bash + +wget https://golang.org/dl/go1.19.linux-amd64.tar.gz +sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.19.linux-amd64.tar.gz +export PATH=$PATH:/usr/local/go/bin + +export K6_VERSION='v0.37.0' + +~/go/bin/xk6 build --output /tmp/k6/k6 \ + --with github.com/jdheyburn/xk6-prometheus@v0.1.6 \ + --with github.com/grafana/xk6-sql@659485a + +telegraf --config telegraf.conf &>/dev/null & + +cd /tmp/k6 || exit 1 + +export RUN_ID="${testrun_id}" +export BENCHMARK_ID="${benchmark_id}" +export TEST_RUN="${testrun_name}" +export TEST_ORIGIN="${test_origin}" +export SUPABENCH_TOKEN="${supabench_token}" +export SUPABENCH_URI="${supabench_uri}" +export PG_PASS="${pg_pass}" +export PG_HOST="${pg_host}" +export MP_TOKEN="${mp_token}" +export MP_URI="${mp_uri}" +export AUTH_URI="${auth_uri}" +export INSTANCES="${instances}" +export PRESENCE_ENABLED="${presence_enabled ? "true" : "false"}" + +make ${make_command} \ + conns="${conns}" duration="${duration}" shift="${shift}" \ + messages_per_second="${messages_per_second}" message_size_kb="${message_size_kb}" \ + testrun="${testrun_name}" diff --git a/examples/realtime/broadcast-from-client/modules/script/main.tf b/examples/realtime/broadcast-from-client/modules/script/main.tf new file mode 100644 index 0000000..e0c94ca --- /dev/null +++ b/examples/realtime/broadcast-from-client/modules/script/main.tf @@ -0,0 +1,172 @@ +# creating ec2 instance that will be used to generate load +# Most likely you will not need to change it + +resource "aws_vpc" "ap-southeast-1" { + enable_dns_support = true + enable_dns_hostnames = true + assign_generated_ipv6_cidr_block = true + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "ap-southeast-1" { + vpc_id = aws_vpc.ap-southeast-1.id + cidr_block = cidrsubnet(aws_vpc.ap-southeast-1.cidr_block, 4, 1) + map_public_ip_on_launch = true + + ipv6_cidr_block = cidrsubnet(aws_vpc.ap-southeast-1.ipv6_cidr_block, 8, 1) + assign_ipv6_address_on_creation = true +} + +resource "aws_internet_gateway" "ap-southeast-1" { + vpc_id = aws_vpc.ap-southeast-1.id +} + +resource "aws_default_route_table" "ap-southeast-1" { + default_route_table_id = aws_vpc.ap-southeast-1.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.ap-southeast-1.id + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.ap-southeast-1.id + } +} + +resource "aws_route_table_association" "ap-southeast-1" { + subnet_id = aws_subnet.ap-southeast-1.id + route_table_id = aws_default_route_table.ap-southeast-1.id +} + +resource "aws_security_group" "ap-southeast-1" { + name = "supabench-tf-security-group-${aws_vpc.ap-southeast-1.id}" + vpc_id = aws_vpc.ap-southeast-1.id + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + ipv6_cidr_blocks = ["::/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + ipv6_cidr_blocks = ["::/0"] + } +} + +resource "aws_instance" "k6" { + count = var.instances_count + + ami = var.ami_id + instance_type = var.instance_type + vpc_security_group_ids = ["${aws_security_group.ap-southeast-1.id}"] + subnet_id = aws_subnet.ap-southeast-1.id + ipv6_address_count = 1 + + key_name = var.key_name + + tags = { + terraform = "true" + environment = "qa" + app = var.sut_name + creator = "supabench" + } +} + +resource "null_resource" "remote" { + count = var.instances_count + + connection { + type = "ssh" + user = var.instance_user + host = aws_instance.k6[count.index].public_ip + private_key = var.private_key_location + timeout = "5m" + } + + provisioner "file" { + source = "${path.root}/k6" + destination = "/tmp" + } + + provisioner "file" { + destination = "/tmp/k6/entrypoint.sh" + + content = templatefile( + "${path.module}/entrypoint.sh.tpl", + { + # add your custom variables here + pg_pass = var.pg_pass + pg_host = var.pg_host + mp_token = var.mp_token + mp_uri = var.mp_uri + auth_uri = var.auth_uri + conns = var.conns + messages_per_second = var.messages_per_second + message_size_kb = var.message_size_kb + shift = 5000 * count.index + instances = var.instances_count + duration = var.duration + make_command = count.index == 0 ? "db_test" : "subs" + + # don't change these + testrun_id = var.testrun_id + benchmark_id = var.benchmark_id + testrun_name = var.testrun_name + test_origin = var.test_origin + supabench_token = var.supabench_token + supabench_uri = var.supabench_uri + presence_enabled = var.presence_enabled + } + ) + } + + provisioner "remote-exec" { + inline = [ + "#!/bin/bash", + "echo \"export RUN_ID='${var.testrun_id}'\" >> ~/.bashrc", + "echo \"export BENCHMARK_ID='${var.benchmark_id}'\" >> ~/.bashrc", + "echo \"export TEST_RUN='${var.testrun_name}'\" >> ~/.bashrc", + "echo \"export TEST_ORIGIN='${var.test_origin}'\" >> ~/.bashrc", + "echo \"export SUPABENCH_TOKEN='${var.supabench_token}'\" >> ~/.bashrc", + "echo \"export SUPABENCH_URI='${var.supabench_uri}'\" >> ~/.bashrc", + "echo \"export PG_PASS='${var.pg_pass}'\" >> ~/.bashrc", + "echo \"export PG_HOST='${var.pg_host}'\" >> ~/.bashrc", + "echo \"export MP_TOKEN='${var.mp_token}'\" >> ~/.bashrc", + "echo \"export MP_URI='${var.mp_uri}'\" >> ~/.bashrc", + "echo \"export AUTH_URI='${var.auth_uri}'\" >> ~/.bashrc", + ] + } + + provisioner "remote-exec" { + inline = [ + "#!/bin/bash", + "source ~/.bashrc", + "sudo chown -R ubuntu:ubuntu /tmp/k6", + "sudo chmod +x /tmp/k6/entrypoint.sh", + "/tmp/k6/entrypoint.sh", + ] + } + + depends_on = [ + aws_instance.k6, + ] +} diff --git a/examples/realtime/broadcast-from-client/modules/script/variables.tf b/examples/realtime/broadcast-from-client/modules/script/variables.tf new file mode 100644 index 0000000..4f5034a --- /dev/null +++ b/examples/realtime/broadcast-from-client/modules/script/variables.tf @@ -0,0 +1,143 @@ +variable "ec2_name" { + description = "Name of ec2 loader instance" + type = string + default = "supaloader" # run ID +} + +variable "instances_count" { + description = "Number of EC2 instances (should be even)" + type = number + default = 8 +} + +variable "instance_type" { + description = "Size of ec2 loader instance" + type = string + default = "t2.micro" # c5.4xlarge +} + +variable "ami_id" { + description = "AMI to use for ec2 loader instance" + type = string +} + +variable "security_group_id" { + description = "Security group to use for ec2 loader instance" + type = string +} + +variable "subnet_id" { + description = "Subnet to use for ec2 loader instance" + type = string +} + +variable "instance_user" { + description = "The instance user for sshing" + type = string + default = "admin" +} + +variable "key_name" { + description = "The instance key" + type = string +} + +variable "private_key_location" { + description = "Location of your private key to SSH into the instance" + type = string +} + +variable "sut_name" { + description = "Name of the system under test" + type = string + default = "" +} + +variable "conns" { + description = "Number of connections to the system under test" + type = string + default = "4" +} + +variable "messages_per_second" { + description = "Messages per second to send" + type = string + default = "60" +} + +variable "message_size_kb" { + description = "Message size in KB" + type = string + default = "1" +} + +variable "duration" { + description = "Duration of the test" + type = string + default = "60" +} + +variable "pg_pass" { + description = "Postgres instance password" + type = string +} + +variable "pg_host" { + description = "Postgres instance host" + type = string + default = "db.proj.supabase.com" +} + +variable "mp_token" { + description = "Multiplayer realtime api token" + type = string +} + +variable "mp_uri" { + description = "Multiplayer realtime api uri" + type = string + default = "wss://proj.supabase.com/realtime/v1/websocket" +} + +variable "auth_uri" { + description = "auth api uri" + type = string + default = "https://proj.supabase.com/auth/v1" +} + +variable "testrun_name" { + description = "Name of the testrun" + type = string +} + +variable "testrun_id" { + description = "ID of the testrun" + type = string +} + +variable "test_origin" { + description = "Origin of the test" + type = string + default = "" +} + +variable "benchmark_id" { + description = "ID of the benchmark" + type = string +} + +variable "supabench_token" { + description = "Token to access the reports" + type = string +} + +variable "supabench_uri" { + description = "URI of the supabench server" + type = string +} + +variable "presence_enabled" { + description = "Enable or disable presence tracking in realtime channels" + type = bool + default = false +} diff --git a/examples/realtime/broadcast-from-client/modules/setup/main.tf b/examples/realtime/broadcast-from-client/modules/setup/main.tf new file mode 100644 index 0000000..4cbb840 --- /dev/null +++ b/examples/realtime/broadcast-from-client/modules/setup/main.tf @@ -0,0 +1,34 @@ +resource "null_resource" "fly" { + triggers = { + app_name = var.app_name + fly_access_token = var.fly_access_token + } + + provisioner "local-exec" { + command = "/flyctl scale -a ${var.app_name} count ${var.app_nodes_count}" + environment = { + HOME = path.module + FLY_ACCESS_TOKEN = var.fly_access_token + } + } + + provisioner "local-exec" { + when = destroy + command = "/flyctl scale -a ${self.triggers.app_name} count 0" + environment = { + HOME = path.module + FLY_ACCESS_TOKEN = self.triggers.fly_access_token + } + } +} + +output "ready" { + # the value is not important because we're just + # using this for its dependencies. + value = {} + + # Anything that refers to this output must wait until + # the actions for azurerm_monitor_diagnostic_setting.example + # to have completed first. + depends_on = [null_resource.fly] +} \ No newline at end of file diff --git a/examples/realtime/broadcast-from-client/modules/setup/variables.tf b/examples/realtime/broadcast-from-client/modules/setup/variables.tf new file mode 100644 index 0000000..06a626f --- /dev/null +++ b/examples/realtime/broadcast-from-client/modules/setup/variables.tf @@ -0,0 +1,15 @@ +variable "app_name" { + description = "Name of fly app" + type = string +} + +variable "fly_access_token" { + description = "Fly access token" + type = string +} + +variable "app_nodes_count" { + description = "Count of fly app nodes" + type = string + default = 6 +} \ No newline at end of file diff --git a/examples/realtime/broadcast-from-client/variables.tf b/examples/realtime/broadcast-from-client/variables.tf new file mode 100644 index 0000000..a0729d1 --- /dev/null +++ b/examples/realtime/broadcast-from-client/variables.tf @@ -0,0 +1,161 @@ +variable "testrun_name" { + description = "Name of the testrun" + type = string +} + +variable "testrun_id" { + description = "ID of the testrun" + type = string +} + +variable "test_origin" { + description = "Origin of the test" + type = string + default = "" +} + +variable "benchmark_id" { + description = "ID of the benchmark" + type = string +} + +variable "supabench_token" { + description = "Token to access the supabench" + type = string + sensitive = true +} + +variable "supabench_uri" { + description = "URI of the supabench server" + type = string +} + +variable "instances_count" { + description = "Number of EC2 instances (should be even)" + type = number + default = 8 +} + +variable "ec2_name" { + description = "Name of ec2 loader instance" + type = string + default = "supaloader" # run ID +} + +variable "instance_type" { + description = "Size of ec2 loader instance" + type = string + default = "t2.micro" # c5.4xlarge +} + +variable "ami_id" { + description = "AMI to use for ec2 loader instance" + type = string +} + +variable "security_group_id" { + description = "Security group to use for ec2 loader instance" + type = string +} + +variable "subnet_id" { + description = "Subnet to use for ec2 loader instance" + type = string +} + +variable "instance_user" { + description = "The instance user for sshing" + type = string + default = "admin" +} + +variable "key_name" { + description = "The instance key" + type = string +} + +variable "private_key_location" { + description = "Location of your private key to SSH into the instance" + type = string +} + +variable "sut_name" { + description = "Name of the system under test" + type = string + default = "" +} + +variable "conns" { + description = "Number of connections to the system under test" + type = string + default = "4" +} + +variable "messages_per_second" { + description = "Messages per second to send" + type = string + default = "60" +} + +variable "message_size_kb" { + description = "Message size in KB" + type = string + default = "1" +} + +variable "duration" { + description = "Duration of the test" + type = string + default = "60" +} + +variable "pg_pass" { + description = "Postgres instance password" + type = string +} + +variable "pg_host" { + description = "Postgres instance host" + type = string + default = "db.proj.supabase.com" +} + +variable "mp_token" { + description = "Multiplayer realtime api token" + type = string +} + +variable "mp_uri" { + description = "Multiplayer realtime api uri" + type = string + default = "wss://proj.supabase.com/realtime/v1/websocket" +} + +variable "auth_uri" { + description = "auth api uri" + type = string + default = "https://proj.supabase.com/auth/v1" +} + +variable "app_name" { + description = "Name of fly app" + type = string + default = "realtime-qa" # fly app name +} + +variable "fly_access_token" { + description = "Fly access token" + type = string +} + +variable "app_nodes_count" { + description = "Count of fly app nodes" + type = string + default = 6 +} + +variable "presence_enabled" { + description = "Enable or disable presence tracking in realtime channels" + type = bool + default = false +}