diff --git a/.github/workflows/docker-s3-deploy.yml b/.github/workflows/docker-s3-deploy.yml index dbca6b11..7eab1969 100644 --- a/.github/workflows/docker-s3-deploy.yml +++ b/.github/workflows/docker-s3-deploy.yml @@ -36,26 +36,47 @@ jobs: exit 1 fi - - name: Import GPG key - uses: crazy-max/ghaction-import-gpg@v6 - with: - gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} - passphrase: ${{ secrets.GPG_PASSPHRASE }} + - name: Install cosign + uses: sigstore/cosign-installer@v3 - - name: Sign tron-docker.zip with GPG + - name: Sign tron-docker.zip with Sigstore cosign (keyless) + # Keyless signing using GitHub OIDC — no private keys to manage or leak. + # The signature is bound to this workflow's identity (repo, ref, commit SHA). + # Verification: cosign verify-blob --certificate tron-docker.zip.cert \ + # --signature tron-docker.zip.sig \ + # --certificate-identity-regexp "https://github.com/tronprotocol/tron-docker" \ + # --certificate-oidc-issuer "https://token.actions.githubusercontent.com" \ + # tron-docker.zip run: | - gpg --detach-sign --armor tron-docker.zip - # This creates tron-docker.zip.asc (ASCII-armored signature) + cosign sign-blob tron-docker.zip \ + --yes \ + --output-signature tron-docker.zip.sig \ + --output-certificate tron-docker.zip.cert - - name: Configure AWS Credentials + - name: Configure AWS Credentials (OIDC — no long-lived keys) uses: aws-actions/configure-aws-credentials@v4 with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ github.event.inputs.aws-region }} # Use input for region + # Uses GitHub OIDC provider to assume an IAM role with short-lived credentials. + # No static keys needed — credentials expire after the workflow run. + # Prerequisites: + # 1. Create an IAM OIDC identity provider for token.actions.githubusercontent.com + # 2. Create an IAM role with trust policy allowing this repo: + # "Condition": { + # "StringEquals": { + # "token.actions.githubusercontent.com:aud": "sts.amazonaws.com", + # "token.actions.githubusercontent.com:sub": "repo:tronprotocol/tron-docker:ref:refs/heads/main" + # } + # } + # 3. Attach S3 put-object policy scoped to the target bucket only + # 4. Delete the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY secrets from GitHub + role-to-assume: ${{ secrets.AWS_OIDC_ROLE_ARN }} + aws-region: ${{ github.event.inputs.aws-region }} - name: Upload tron-docker.zip to S3 + env: + BUCKET_NAME: ${{ github.event.inputs.bucket-name }} + VERSION: ${{ github.event.inputs.version }} run: | - zip -r publish.zip tron-docker.zip tron-docker.zip.asc - aws s3 cp publish.zip s3://${{ github.event.inputs.bucket-name }}/package/publish-latest.zip - aws s3 cp publish.zip s3://${{ github.event.inputs.bucket-name }}/package/publish-${{github.event.inputs.version}}.zip + zip -r publish.zip tron-docker.zip tron-docker.zip.sig tron-docker.zip.cert + aws s3 cp publish.zip "s3://${BUCKET_NAME}/package/publish-latest.zip" + aws s3 cp publish.zip "s3://${BUCKET_NAME}/package/publish-${VERSION}.zip" diff --git a/conf/private_net_config_witness1.conf b/conf/private_net_config_witness1.conf index 22953e16..1e07306f 100644 --- a/conf/private_net_config_witness1.conf +++ b/conf/private_net_config_witness1.conf @@ -272,7 +272,10 @@ genesis.block = { } localwitness = [ - # address TPL66VK2gCXNCD7EJg9pgJRfqcRazjhUZY + # IMPORTANT: This is a DEMO private key for local testing ONLY + # Address: TPL66VK2gCXNCD7EJg9pgJRfqcRazjhUZY + # WARNING: Replace with your own generated key for any real deployment + # NEVER use this key on mainnet with real funds da146374a75310b9666e834ee4ad0866d6f4035967bfc76217c5a495fff9f0d0 # you must enable this value and the witness address are match. ] diff --git a/conf/private_net_config_witness2.conf b/conf/private_net_config_witness2.conf index b07d09d3..9f53d58f 100644 --- a/conf/private_net_config_witness2.conf +++ b/conf/private_net_config_witness2.conf @@ -274,7 +274,10 @@ genesis.block = { } localwitness = [ - # address TCjptjyjenNKB2Y6EwyVT43DQyUUorxKWi + # IMPORTANT: This is a DEMO private key for local testing ONLY + # Address: TCjptjyjenNKB2Y6EwyVT43DQyUUorxKWi + # WARNING: Replace with your own generated key for any real deployment + # NEVER use this key on mainnet with real funds 0ab0b4893c83102ed7be35eee6d50f081625ac75a07da6cb58b1ad2e9c18ce43 # you must enable this value and the witness address are match. ] diff --git a/conf/private_net_layout.toml b/conf/private_net_layout.toml index 11519b22..5a8706d5 100644 --- a/conf/private_net_layout.toml +++ b/conf/private_net_layout.toml @@ -1,8 +1,15 @@ +# SECURITY WARNING: This file contains sensitive configuration for remote node deployment. +# DO NOT commit actual credentials, SSH keys, or real hostnames to the repository. +# Use environment variables, .env files (added to .gitignore), or secure credential management systems. +# See: https://12factor.net/config +# +# Example configuration for private network layout: + # [[nodes]] -# node_ip = "192.168.1.1" # Remote node's IP +# node_ip = "127.0.0.1" # Remote node's IP # node_directory = "/path/to/direcotry" # Remote node's working direcotry for node # config_file = "/path/to/config" # Config file for remote node -# docker_compose_file =”/path/to/config“ # Config docker-compose file for remote node +# docker_compose_file ="/path/to/config" # Config docker-compose file for remote node # node_type = "fullnode/sr" # Fullnode or SR node # ssh_port = 22 # ssh_user = "user1" @@ -10,10 +17,10 @@ # # ssh_key = "/path/to/key1" # Optional; uncomment if using key auth # [[nodes]] -# node_ip = "192.168.1.2" # Changed IP to demonstrate different nodes +# node_ip = "127.0.0.1" # Changed IP to demonstrate different nodes # node_directory = "/path/to/directory" # config_file = "/path/to/config" -# docker_compose_file =”/path/to/config“ # Config docker-compose file for remote node +# docker_compose_file ="/path/to/config" # Config docker-compose file for remote node # node_type = "fullnode/sr" # ssh_port = 2222 # Custom SCP port for this node # ssh_user = "user2" @@ -21,11 +28,13 @@ [[nodes]] -node_ip = "ec2-3-25-116-244.ap-southeast-2.compute.amazonaws.com" -node_directory = "/home/ubuntu/mytest" -config_file = "/Users/ubuntu/conf/private_net_config_others.conf" -docker_compose_file = "/Users/ubuntu/docker-compose.yml" +node_ip = "127.0.0.1" # Replace with your actual node IP or hostname +node_directory = "/path/to/tron-node" # Replace with your actual node directory +config_file = "/path/to/private_net_config.conf" # Replace with your actual config path +docker_compose_file = "/path/to/docker-compose.yml" # Replace with your actual docker-compose path ssh_port = 22 -ssh_user = "ubuntu" -# ssh_password = "password1" -ssh_key = "/Users/ubuntu/Downloads/test-ci.pem" # Optional; uncomment if using key auth +ssh_user = "ubuntu" # Replace with your actual SSH user +# ssh_password = "password" # Optional; uncomment if using password auth (NOT RECOMMENDED) +# ssh_key = "/path/to/your/private/key" # Optional; uncomment if using key auth +# SECURITY WARNING: Never commit actual SSH keys or private credentials to the repository! +# Use environment variables or secure credential management systems instead. diff --git a/metric_monitor/REMOTE_WRITE_WITH_THANOS.md b/metric_monitor/REMOTE_WRITE_WITH_THANOS.md index e103a949..eb994a39 100644 --- a/metric_monitor/REMOTE_WRITE_WITH_THANOS.md +++ b/metric_monitor/REMOTE_WRITE_WITH_THANOS.md @@ -145,12 +145,18 @@ docker-compose -f ./docker-compose/docker-compose-alloy.yml up -d The [Thanos Receive](https://thanos.io/tip/components/receive.md/#receiver) service implements the Prometheus Remote Write API. It builds on top of the existing Prometheus TSDB and retains its usefulness while extending its functionality with long-term-storage, horizontal scalability, and downsampling. Prometheus instances are configured to continuously write metrics to it. Thanos Receive exposes the StoreAPI so that Thanos Queriers can query received metrics in real-time. -First, deploy [Minio](https://github.com/minio/minio) for long-term metric storage. Minio offers S3-compatible object storage functionality, allowing Thanos Receive to upload TSDB blocks to storage buckets at 2-hour intervals. While this guide uses Minio, you can opt for any storage service from the [Thanos Supported Clients](https://thanos.io/tip/thanos/storage.md/#supported-clients) list. For long-term monitoring, we recommend implementing a retention policy on your storage service to efficiently manage historical metric data. For instance, you might configure an S3 lifecycle policy when using AWS to automatically remove metrics older than one year. +First, deploy [Minio](https://github.com/minio/minio) for long-term metric storage. Minio offers S3-compatible object storage functionality, allowing Thanos Receive to upload TSDB blocks to storage buckets at 2-hour intervals. + +**⚠️ Important**: The MinIO configuration in this guide uses demo credentials (`minio`/`melovethanos`) for local testing only. For production deployments, use AWS S3 or other cloud storage services with proper IAM credentials, or generate strong unique credentials if using MinIO. + +While this guide uses Minio, you can opt for any storage service from the [Thanos Supported Clients](https://thanos.io/tip/thanos/storage.md/#supported-clients) list. For long-term monitoring, we recommend implementing a retention policy on your storage service to efficiently manage historical metric data. For instance, you might configure an S3 lifecycle policy when using AWS to automatically remove metrics older than one year. + ```sh -# Start Minio +# Start Minio (for local testing only) docker-compose -f ./docker-compose/minio.yml up -d # First set the MinIO alias with root credentials to enable bucket creation permissions +# Note: These are demo credentials - replace with your own in production docker exec minio mc alias set local http://localhost:9000 minio melovethanos # Then create the bucket diff --git a/metric_monitor/conf/bucket_storage.yml b/metric_monitor/conf/bucket_storage.yml index a9d4eded..e90dffaa 100644 --- a/metric_monitor/conf/bucket_storage.yml +++ b/metric_monitor/conf/bucket_storage.yml @@ -3,5 +3,8 @@ config: bucket: "test-thanos-001" endpoint: "minio:9000" # for example: s3.ap-southeast-1.amazonaws.com for AWS S3 on region ap-southeast-1 insecure: true # True for local test using http instead of https + # ⚠️ DEMO CREDENTIALS FOR LOCAL TESTING ONLY ⚠️ + # These match the MinIO demo credentials and should NEVER be used in production + # For production: Use AWS S3 with proper IAM roles or access keys access_key: "minio" secret_key: "melovethanos" diff --git a/metric_monitor/conf/main_net_config_open_metric.conf b/metric_monitor/conf/main_net_config_open_metric.conf new file mode 100644 index 00000000..365bc556 --- /dev/null +++ b/metric_monitor/conf/main_net_config_open_metric.conf @@ -0,0 +1,766 @@ +storage { + # Directory for storing persistent data + db.engine = "LEVELDB", + db.sync = false, + db.directory = "database", + index.directory = "index", + transHistory.switch = "on", + # You can custom these 14 databases' configs: + + # account, account-index, asset-issue, block, block-index, + # block_KDB, peers, properties, recent-block, trans, + # utxo, votes, witness, witness_schedule. + + # Otherwise, db configs will remain default and data will be stored in + # the path of "output-directory" or which is set by "-d" ("--output-directory"). + + # setting can impove leveldb performance .... start + # node: if this will increase process fds,you may be check your ulimit if 'too many open files' error occurs + # see https://github.com/tronprotocol/tips/blob/master/tip-343.md for detail + # if you find block sync has lower performance,you can try this settings + #default = { + # maxOpenFiles = 100 + #} + #defaultM = { + # maxOpenFiles = 500 + #} + #defaultL = { + # maxOpenFiles = 1000 + #} + # setting can impove leveldb performance .... end + + # Attention: name is a required field that must be set !!! + properties = [ + // { + // name = "account", + // path = "storage_directory_test", + // createIfMissing = true, + // paranoidChecks = true, + // verifyChecksums = true, + // compressionType = 1, // compressed with snappy + // blockSize = 4096, // 4 KB = 4 * 1024 B + // writeBufferSize = 10485760, // 10 MB = 10 * 1024 * 1024 B + // cacheSize = 10485760, // 10 MB = 10 * 1024 * 1024 B + // maxOpenFiles = 100 + // }, + // { + // name = "account-index", + // path = "storage_directory_test", + // createIfMissing = true, + // paranoidChecks = true, + // verifyChecksums = true, + // compressionType = 1, // compressed with snappy + // blockSize = 4096, // 4 KB = 4 * 1024 B + // writeBufferSize = 10485760, // 10 MB = 10 * 1024 * 1024 B + // cacheSize = 10485760, // 10 MB = 10 * 1024 * 1024 B + // maxOpenFiles = 100 + // }, + ] + + needToUpdateAsset = true + + //dbsettings is needed when using rocksdb as the storage implement (db.engine="ROCKSDB"). + //we'd strongly recommend that do not modify it unless you know every item's meaning clearly. + dbSettings = { + levelNumber = 7 + //compactThreads = 32 + blocksize = 64 // n * KB + maxBytesForLevelBase = 256 // n * MB + maxBytesForLevelMultiplier = 10 + level0FileNumCompactionTrigger = 4 + targetFileSizeBase = 256 // n * MB + targetFileSizeMultiplier = 1 + } + + //backup settings when using rocks db as the storage implement (db.engine="ROCKSDB"). + //if you want to use the backup plugin, please confirm set the db.engine="ROCKSDB" above. + backup = { + enable = false // indicate whether enable the backup plugin + propPath = "prop.properties" // record which bak directory is valid + bak1path = "bak1/database" // you must set two backup directories to prevent application halt unexpected(e.g. kill -9). + bak2path = "bak2/database" + frequency = 10000 // indicate backup db once every 10000 blocks processed. + } + + balance.history.lookup = false + + # checkpoint.version = 2 + # checkpoint.sync = true + + # the estimated number of block transactions (default 1000, min 100, max 10000). + # so the total number of cached transactions is 65536 * txCache.estimatedTransactions + # txCache.estimatedTransactions = 1000 + # if true, transaction cache initialization will be faster. default false + # txCache.initOptimization = true + + # data root setting, for check data, currently, only reward-vi is used. + + # merkleRoot = { + # reward-vi = 9debcb9924055500aaae98cdee10501c5c39d4daa75800a996f4bdda73dbccd8 // main-net, Sha256Hash, hexString + # } + +} + +node.discovery = { + enable = true + persist = true +} + +# custom stop condition +#node.shutdown = { +# BlockTime = "54 59 08 * * ?" # if block header time in persistent db matched. +# BlockHeight = 33350800 # if block header height in persistent db matched. +# BlockCount = 12 # block sync count after node start. +#} + +node.backup { + # udp listen port, each member should have the same configuration + port = 10001 + + # my priority, each member should use different priority + priority = 8 + + # time interval to send keepAlive message, each member should have the same configuration + keepAliveInterval = 3000 + + # peer's ip list, can't contain mine + members = [ + # "ip", + # "ip" + ] +} + +crypto { + engine = "eckey" +} +# prometheus metrics start +node.metrics = { + prometheus{ + enable=true + port="9527" + } +} + +# prometheus metrics end + +node { + # trust node for solidity node + # trustNode = "ip:port" + trustNode = "127.0.0.1:50051" + + # expose extension api to public or not + walletExtensionApi = true + + listen.port = 18888 + + connection.timeout = 2 + + fetchBlock.timeout = 200 + + tcpNettyWorkThreadNum = 0 + + udpNettyWorkThreadNum = 1 + + # Number of validate sign thread, default availableProcessors + # validateSignThreadNum = 16 + + maxConnections = 30 + + minConnections = 8 + + minActiveConnections = 3 + + maxConnectionsWithSameIp = 2 + + maxHttpConnectNumber = 50 + + minParticipationRate = 15 + + isOpenFullTcpDisconnect = false + inactiveThreshold = 600 //seconds + + p2p { + version = 11111 # mainnet:11111; nile testnet:201910292 + } + + active = [ + # Active establish connection in any case + # Sample entries: + # "ip:port", + # "ip:port" + ] + + passive = [ + # Passive accept connection in any case + # Sample entries: + # "ip:port", + # "ip:port" + ] + + fastForward = [ + "100.27.171.62:18888", + "15.188.6.125:18888" + ] + + http { + fullNodeEnable = true + fullNodePort = 8090 + solidityEnable = true + solidityPort = 8091 + PBFTEnable = true + PBFTPort = 8092 + } + + rpc { + enable = true + port = 50051 + solidityEnable = true + solidityPort = 50061 + PBFTEnable = true + PBFTPort = 50071 + + # Number of gRPC thread, default availableProcessors / 2 + # thread = 16 + + # The maximum number of concurrent calls permitted for each incoming connection + # maxConcurrentCallsPerConnection = + + # The HTTP/2 flow control window, default 1MB + # flowControlWindow = + + # Connection being idle for longer than which will be gracefully terminated + maxConnectionIdleInMillis = 60000 + + # Connection lasting longer than which will be gracefully terminated + # maxConnectionAgeInMillis = + + # The maximum message size allowed to be received on the server, default 4MB + # maxMessageSize = + + # The maximum size of header list allowed to be received, default 8192 + # maxHeaderListSize = + + # Transactions can only be broadcast if the number of effective connections is reached. + minEffectiveConnection = 1 + + # The switch of the reflection service, effective for all gRPC services + # reflectionService = true + } + + # number of solidity thread in the FullNode. + # If accessing solidity rpc and http interface timeout, could increase the number of threads, + # The default value is the number of cpu cores of the machine. + #solidity.threads = 8 + + # Limits the maximum percentage (default 75%) of producing block interval + # to provide sufficient time to perform other operations e.g. broadcast block + # blockProducedTimeOut = 75 + + # Limits the maximum number (default 700) of transaction from network layer + # netMaxTrxPerSecond = 700 + + # Whether to enable the node detection function, default false + # nodeDetectEnable = false + + # use your ipv6 address for node discovery and tcp connection, default false + # enableIpv6 = false + + # if your node's highest block num is below than all your pees', try to acquire new connection. default false + # effectiveCheckEnable = false + + # Dynamic loading configuration function, disabled by default + # dynamicConfig = { + # enable = false + # Configuration file change check interval, default is 600 seconds + # checkInterval = 600 + # } + + dns { + # dns urls to get nodes, url format tree://{pubkey}@{domain}, default empty + treeUrls = [ + #"tree://AKMQMNAJJBL73LXWPXDI4I5ZWWIZ4AWO34DWQ636QOBBXNFXH3LQS@main.trondisco.net", + ] + + # enable or disable dns publish, default false + # publish = false + + # dns domain to publish nodes, required if publish is true + # dnsDomain = "nodes1.example.org" + + # dns private key used to publish, required if publish is true, hex string of length 64 + # dnsPrivate = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" + + # known dns urls to publish if publish is true, url format tree://{pubkey}@{domain}, default empty + # knownUrls = [ + #"tree://APFGGTFOBVE2ZNAB3CSMNNX6RRK3ODIRLP2AA5U4YFAA6MSYZUYTQ@nodes2.example.org", + # ] + + # staticNodes = [ + # static nodes to published on dns + # Sample entries: + # "ip:port", + # "ip:port" + # ] + + # merge several nodes into a leaf of tree, should be 1~5 + # maxMergeSize = 5 + + # only nodes change percent is bigger then the threshold, we update data on dns + # changeThreshold = 0.1 + + # dns server to publish, required if publish is true, only aws or aliyun is support + # serverType = "aws" + + # access key id of aws or aliyun api, required if publish is true, string + # accessKeyId = "your-key-id" + + # access key secret of aws or aliyun api, required if publish is true, string + # accessKeySecret = "your-key-secret" + + # if publish is true and serverType is aliyun, it's endpoint of aws dns server, string + # aliyunDnsEndpoint = "alidns.aliyuncs.com" + + # if publish is true and serverType is aws, it's region of aws api, such as "eu-south-1", string + # awsRegion = "us-east-1" + + # if publish is true and server-type is aws, it's host zone id of aws's domain, string + # awsHostZoneId = "your-host-zone-id" + } + + # open the history query APIs(http&GRPC) when node is a lite fullNode, + # like {getBlockByNum, getBlockByID, getTransactionByID...}. + # default: false. + # note: above APIs may return null even if blocks and transactions actually are on the blockchain + # when opening on a lite fullnode. only open it if the consequences being clearly known + # openHistoryQueryWhenLiteFN = false + + jsonrpc { + # Note: If you turn on jsonrpc and run it for a while and then turn it off, you will not + # be able to get the data from eth_getLogs for that period of time. + + # httpFullNodeEnable = true + # httpFullNodePort = 8545 + # httpSolidityEnable = true + # httpSolidityPort = 8555 + # httpPBFTEnable = true + # httpPBFTPort = 8565 + + # The maximum blocks range to retrieve logs for eth_getLogs, default value is 5000, + # should be > 0, otherwise means no limit. + maxBlockRange = 5000 + + # The maximum number of allowed topics within a topic criteria, default value is 1000, + # should be > 0, otherwise means no limit. + maxSubTopics = 1000 + } + + # Disabled api list, it will work for http, rpc and pbft, both fullnode and soliditynode, + # but not jsonrpc. + # Sample: The setting is case insensitive, GetNowBlock2 is equal to getnowblock2 + # + # disabledApi = [ + # "getaccount", + # "getnowblock2" + # ] + +} + +## rate limiter config +rate.limiter = { + # Every api could be set a specific rate limit strategy. Three strategy are supported:GlobalPreemptibleAdapter、IPQPSRateLimiterAdapte、QpsRateLimiterAdapter + # GlobalPreemptibleAdapter: permit is the number of preemptible resource, every client must apply one resourse + # before do the request and release the resource after got the reponse automaticlly. permit should be a Integer. + # QpsRateLimiterAdapter: qps is the average request count in one second supported by the server, it could be a Double or a Integer. + # IPQPSRateLimiterAdapter: similar to the QpsRateLimiterAdapter, qps could be a Double or a Integer. + # If do not set, the "default strategy" is set.The "default startegy" is based on QpsRateLimiterAdapter, the qps is set as 10000. + # + # Sample entries: + # + http = [ + # { + # component = "GetNowBlockServlet", + # strategy = "GlobalPreemptibleAdapter", + # paramString = "permit=1" + # }, + + # { + # component = "GetAccountServlet", + # strategy = "IPQPSRateLimiterAdapter", + # paramString = "qps=1" + # }, + + # { + # component = "ListWitnessesServlet", + # strategy = "QpsRateLimiterAdapter", + # paramString = "qps=1" + # } + ], + + rpc = [ + # { + # component = "protocol.Wallet/GetBlockByLatestNum2", + # strategy = "GlobalPreemptibleAdapter", + # paramString = "permit=1" + # }, + + # { + # component = "protocol.Wallet/GetAccount", + # strategy = "IPQPSRateLimiterAdapter", + # paramString = "qps=1" + # }, + + # { + # component = "protocol.Wallet/ListWitnesses", + # strategy = "QpsRateLimiterAdapter", + # paramString = "qps=1" + # }, + ] + + # global qps, default 50000 + # global.qps = 50000 + # IP-based global qps, default 10000 + # global.ip.qps = 10000 +} + + + +seed.node = { + # List of the seed nodes + # Seed nodes are stable full nodes + # example: + # ip.list = [ + # "ip:port", + # "ip:port" + # ] + ip.list = [ + "3.225.171.164:18888", + "52.53.189.99:18888", + "18.196.99.16:18888", + "34.253.187.192:18888", + "18.133.82.227:18888", + "35.180.51.163:18888", + "54.252.224.209:18888", + "18.231.27.82:18888", + "52.15.93.92:18888", + "34.220.77.106:18888", + "15.207.144.3:18888", + "13.124.62.58:18888", + "54.151.226.240:18888", + "35.174.93.198:18888", + "18.210.241.149:18888", + "54.177.115.127:18888", + "54.254.131.82:18888", + "18.167.171.167:18888", + "54.167.11.177:18888", + "35.74.7.196:18888", + "52.196.244.176:18888", + "54.248.129.19:18888", + "43.198.142.160:18888", + "3.0.214.7:18888", + "54.153.59.116:18888", + "54.153.94.160:18888", + "54.82.161.39:18888", + "54.179.207.68:18888", + "18.142.82.44:18888", + "18.163.230.203:18888", + # "[2a05:d014:1f2f:2600:1b15:921:d60b:4c60]:18888", // use this if support ipv6 + # "[2600:1f18:7260:f400:8947:ebf3:78a0:282b]:18888", // use this if support ipv6 + ] +} + +genesis.block = { + # Reserve balance + assets = [ + { + accountName = "Zion" + accountType = "AssetIssue" + address = "TLLM21wteSPs4hKjbxgmH1L6poyMjeTbHm" + balance = "99000000000000000" + }, + { + accountName = "Sun" + accountType = "AssetIssue" + address = "TXmVpin5vq5gdZsciyyjdZgKRUju4st1wM" + balance = "0" + }, + { + accountName = "Blackhole" + accountType = "AssetIssue" + address = "TLsV52sRDL79HXGGm9yzwKibb6BeruhUzy" + balance = "-9223372036854775808" + } + ] + + witnesses = [ + { + address: THKJYuUmMKKARNf7s2VT51g5uPY6KEqnat, + url = "http://GR1.com", + voteCount = 100000026 + }, + { + address: TVDmPWGYxgi5DNeW8hXrzrhY8Y6zgxPNg4, + url = "http://GR2.com", + voteCount = 100000025 + }, + { + address: TWKZN1JJPFydd5rMgMCV5aZTSiwmoksSZv, + url = "http://GR3.com", + voteCount = 100000024 + }, + { + address: TDarXEG2rAD57oa7JTK785Yb2Et32UzY32, + url = "http://GR4.com", + voteCount = 100000023 + }, + { + address: TAmFfS4Tmm8yKeoqZN8x51ASwdQBdnVizt, + url = "http://GR5.com", + voteCount = 100000022 + }, + { + address: TK6V5Pw2UWQWpySnZyCDZaAvu1y48oRgXN, + url = "http://GR6.com", + voteCount = 100000021 + }, + { + address: TGqFJPFiEqdZx52ZR4QcKHz4Zr3QXA24VL, + url = "http://GR7.com", + voteCount = 100000020 + }, + { + address: TC1ZCj9Ne3j5v3TLx5ZCDLD55MU9g3XqQW, + url = "http://GR8.com", + voteCount = 100000019 + }, + { + address: TWm3id3mrQ42guf7c4oVpYExyTYnEGy3JL, + url = "http://GR9.com", + voteCount = 100000018 + }, + { + address: TCvwc3FV3ssq2rD82rMmjhT4PVXYTsFcKV, + url = "http://GR10.com", + voteCount = 100000017 + }, + { + address: TFuC2Qge4GxA2U9abKxk1pw3YZvGM5XRir, + url = "http://GR11.com", + voteCount = 100000016 + }, + { + address: TNGoca1VHC6Y5Jd2B1VFpFEhizVk92Rz85, + url = "http://GR12.com", + voteCount = 100000015 + }, + { + address: TLCjmH6SqGK8twZ9XrBDWpBbfyvEXihhNS, + url = "http://GR13.com", + voteCount = 100000014 + }, + { + address: TEEzguTtCihbRPfjf1CvW8Euxz1kKuvtR9, + url = "http://GR14.com", + voteCount = 100000013 + }, + { + address: TZHvwiw9cehbMxrtTbmAexm9oPo4eFFvLS, + url = "http://GR15.com", + voteCount = 100000012 + }, + { + address: TGK6iAKgBmHeQyp5hn3imB71EDnFPkXiPR, + url = "http://GR16.com", + voteCount = 100000011 + }, + { + address: TLaqfGrxZ3dykAFps7M2B4gETTX1yixPgN, + url = "http://GR17.com", + voteCount = 100000010 + }, + { + address: TX3ZceVew6yLC5hWTXnjrUFtiFfUDGKGty, + url = "http://GR18.com", + voteCount = 100000009 + }, + { + address: TYednHaV9zXpnPchSywVpnseQxY9Pxw4do, + url = "http://GR19.com", + voteCount = 100000008 + }, + { + address: TCf5cqLffPccEY7hcsabiFnMfdipfyryvr, + url = "http://GR20.com", + voteCount = 100000007 + }, + { + address: TAa14iLEKPAetX49mzaxZmH6saRxcX7dT5, + url = "http://GR21.com", + voteCount = 100000006 + }, + { + address: TBYsHxDmFaRmfCF3jZNmgeJE8sDnTNKHbz, + url = "http://GR22.com", + voteCount = 100000005 + }, + { + address: TEVAq8dmSQyTYK7uP1ZnZpa6MBVR83GsV6, + url = "http://GR23.com", + voteCount = 100000004 + }, + { + address: TRKJzrZxN34YyB8aBqqPDt7g4fv6sieemz, + url = "http://GR24.com", + voteCount = 100000003 + }, + { + address: TRMP6SKeFUt5NtMLzJv8kdpYuHRnEGjGfe, + url = "http://GR25.com", + voteCount = 100000002 + }, + { + address: TDbNE1VajxjpgM5p7FyGNDASt3UVoFbiD3, + url = "http://GR26.com", + voteCount = 100000001 + }, + { + address: TLTDZBcPoJ8tZ6TTEeEqEvwYFk2wgotSfD, + url = "http://GR27.com", + voteCount = 100000000 + } + ] + + timestamp = "0" #2017-8-26 12:00:00 + + parentHash = "0xe58f33f9baf9305dc6f82b9f1934ea8f0ade2defb951258d50167028c780351f" +} + +// Optional.The default is empty. +// It is used when the witness account has set the witnessPermission. +// When it is not empty, the localWitnessAccountAddress represents the address of the witness account, +// and the localwitness is configured with the private key of the witnessPermissionAddress in the witness account. +// When it is empty,the localwitness is configured with the private key of the witness account. + +//localWitnessAccountAddress = + +localwitness = [ +] + +#localwitnesskeystore = [ +# "localwitnesskeystore.json" +#] + +block = { + needSyncCheck = true + maintenanceTimeInterval = 21600000 + proposalExpireTime = 259200000 // 3 day: 259200000(ms) +} + +# Transaction reference block, default is "solid", configure to "head" may cause TaPos error +# trx.reference.block = "solid" // head;solid; + +# This property sets the number of milliseconds after the creation of the transaction that is expired, default value is 60000. +# trx.expiration.timeInMilliseconds = 60000 + +vm = { + supportConstant = false + maxEnergyLimitForConstant = 100000000 + minTimeRatio = 0.0 + maxTimeRatio = 5.0 + saveInternalTx = false + + # Indicates whether the node stores featured internal transactions, such as freeze, vote and so on + # saveFeaturedInternalTx = false + + # Indicates whether the node stores the details of the internal transactions generated by the + # CANCELALLUNFREEZEV2 opcode, such as bandwidth/energy/tronpower cancel amount. + # saveCancelAllUnfreezeV2Details = false + + # In rare cases, transactions that will be within the specified maximum execution time (default 10(ms)) are re-executed and packaged + # longRunningTime = 10 + + # Indicates whether the node support estimate energy API. + # estimateEnergy = false + + # Indicates the max retry time for executing transaction in estimating energy. + # estimateEnergyMaxRetry = 3 +} + +committee = { + allowCreationOfContracts = 0 //mainnet:0 (reset by committee),test:1 + allowAdaptiveEnergy = 0 //mainnet:0 (reset by committee),test:1 +} + +event.subscribe = { + native = { + useNativeQueue = true // if true, use native message queue, else use event plugin. + bindport = 5555 // bind port + sendqueuelength = 1000 //max length of send queue + } + version = 0 + # Specify the starting block number to sync historical events. This is only applicable when version = 1. + # After performing a full event sync, set this value to 0 or a negative number. + # startSyncBlockNum = 1 + + path = "" // absolute path of plugin + server = "" // target server address to receive event triggers + // dbname|username|password, if you want to create indexes for collections when the collections + // are not exist, you can add version and set it to 2, as dbname|username|password|version + // if you use version 2 and one collection not exists, it will create index automaticaly; + // if you use version 2 and one collection exists, it will not create index, you must create index manually; + dbconfig = "" + contractParse = true + topics = [ + { + triggerName = "block" // block trigger, the value can't be modified + enable = false + topic = "block" // plugin topic, the value could be modified + solidified = false // if set true, just need solidified block, default is false + }, + { + triggerName = "transaction" + enable = false + topic = "transaction" + solidified = false + ethCompatible = false // if set true, add transactionIndex, cumulativeEnergyUsed, preCumulativeLogCount, logList, energyUnitPrice, default is false + }, + { + triggerName = "contractevent" + enable = false + topic = "contractevent" + }, + { + triggerName = "contractlog" + enable = false + topic = "contractlog" + redundancy = false // if set true, contractevent will also be regarded as contractlog + }, + { + triggerName = "solidity" // solidity block trigger(just include solidity block number and timestamp), the value can't be modified + enable = true // the default value is true + topic = "solidity" + }, + { + triggerName = "solidityevent" + enable = false + topic = "solidityevent" + }, + { + triggerName = "soliditylog" + enable = false + topic = "soliditylog" + redundancy = false // if set true, solidityevent will also be regarded as soliditylog + } + ] + + filter = { + fromblock = "" // the value could be "", "earliest" or a specified block number as the beginning of the queried range + toblock = "" // the value could be "", "latest" or a specified block number as end of the queried range + contractAddress = [ + "" // contract address you want to subscribe, if it's set to "", you will receive contract logs/events with any contract address. + ] + + contractTopic = [ + "" // contract topic you want to subscribe, if it's set to "", you will receive contract logs/events with any contract topic. + ] + } +} diff --git a/metric_monitor/docker-compose/docker-compose-alloy.yml b/metric_monitor/docker-compose/docker-compose-alloy.yml index 3b70a85d..cd0acfee 100644 --- a/metric_monitor/docker-compose/docker-compose-alloy.yml +++ b/metric_monitor/docker-compose/docker-compose-alloy.yml @@ -2,7 +2,7 @@ version: '3.8' services: alloy: - image: grafana/alloy:latest + image: grafana/alloy:v1.14.0 container_name: grafana-alloy volumes: - '/:/host:ro,rslave' diff --git a/metric_monitor/docker-compose/docker-compose-quick-start.yml b/metric_monitor/docker-compose/docker-compose-quick-start.yml index 575eee2a..bd541744 100644 --- a/metric_monitor/docker-compose/docker-compose-quick-start.yml +++ b/metric_monitor/docker-compose/docker-compose-quick-start.yml @@ -6,7 +6,7 @@ services: service: tron-node prometheus: - image: prom/prometheus:latest + image: prom/prometheus:v3.10.0 container_name: prometheus deploy: resources: diff --git a/metric_monitor/docker-compose/grafana.yml b/metric_monitor/docker-compose/grafana.yml index 49feafd8..581e60ab 100644 --- a/metric_monitor/docker-compose/grafana.yml +++ b/metric_monitor/docker-compose/grafana.yml @@ -1,13 +1,16 @@ version: '3.8' services: grafana: - image: grafana/grafana-oss + image: grafana/grafana-oss:12.4.1 container_name: grafana - user: root + security_opt: + - no-new-privileges:true + cap_drop: + - ALL deploy: resources: limits: - memory: 1g + memory: 2g environment: - GF_METRICS_ENABLED=true # Enable Grafana metrics exposure - GF_SERVER_HTTP_PORT=3000 # Set Grafana's internal port to 3000 diff --git a/metric_monitor/docker-compose/minio.yml b/metric_monitor/docker-compose/minio.yml index a90c82b6..528732c2 100644 --- a/metric_monitor/docker-compose/minio.yml +++ b/metric_monitor/docker-compose/minio.yml @@ -1,13 +1,17 @@ version: '3.8' services: minio: - image: minio/minio:latest + image: minio/minio:RELEASE.2025-09-07T16-13-09Z container_name: minio ports: - "9000:9000" # Map port for API - "9001:9001" # Map port for UI command: minio server /data --console-address ":9001" environment: + # ⚠️ DEMO CREDENTIALS FOR LOCAL TESTING ONLY ⚠️ + # These are publicly known credentials for demonstration purposes + # For production: Use AWS S3 or other cloud storage with proper IAM credentials + # Never use these credentials in production environments MINIO_ROOT_USER: minio MINIO_ROOT_PASSWORD: melovethanos volumes: diff --git a/metric_monitor/docker-compose/node-exporter.yml b/metric_monitor/docker-compose/node-exporter.yml index da8bac20..c858c8e4 100644 --- a/metric_monitor/docker-compose/node-exporter.yml +++ b/metric_monitor/docker-compose/node-exporter.yml @@ -1,7 +1,7 @@ version: '3.8' services: node-exporter: - image: quay.io/prometheus/node-exporter:latest + image: quay.io/prometheus/node-exporter:v1.10.2 container_name: node-exporter network_mode: host pid: host diff --git a/metric_monitor/docker-compose/prometheus.yml b/metric_monitor/docker-compose/prometheus.yml index dd192047..7ae56e81 100644 --- a/metric_monitor/docker-compose/prometheus.yml +++ b/metric_monitor/docker-compose/prometheus.yml @@ -1,9 +1,12 @@ version: '3.8' services: prometheus: - image: prom/prometheus:latest + image: prom/prometheus:v3.10.0 container_name: prometheus - user: root + security_opt: + - no-new-privileges:true + cap_drop: + - ALL ports: - "9090:9090" # used for local query metrics volumes: diff --git a/metric_monitor/docker-compose/thanos-querier.yml b/metric_monitor/docker-compose/thanos-querier.yml index 4221da15..54881909 100644 --- a/metric_monitor/docker-compose/thanos-querier.yml +++ b/metric_monitor/docker-compose/thanos-querier.yml @@ -3,7 +3,6 @@ services: thanos-querier: image: quay.io/thanos/thanos:v0.33.0 container_name: thanos-querier - user: root ports: - "9091:9091" command: diff --git a/metric_monitor/docker-compose/thanos-receive.yml b/metric_monitor/docker-compose/thanos-receive.yml index 6f394c55..b5d26a0b 100644 --- a/metric_monitor/docker-compose/thanos-receive.yml +++ b/metric_monitor/docker-compose/thanos-receive.yml @@ -2,7 +2,6 @@ version: '3.8' services: thanos-receive: image: quay.io/thanos/thanos:v0.33.0 - user: root container_name: thanos-receive volumes: - ../receive-data-0:/receive/data diff --git a/metric_monitor/docker-compose/thanos-store.yml b/metric_monitor/docker-compose/thanos-store.yml index 69084528..fccfba9e 100644 --- a/metric_monitor/docker-compose/thanos-store.yml +++ b/metric_monitor/docker-compose/thanos-store.yml @@ -3,7 +3,6 @@ services: thanos-store: image: quay.io/thanos/thanos:v0.33.0 container_name: thanos-store - user: root ports: - "10911:10911" # HTTP - "10912:10912" # gRPC diff --git a/metric_monitor/docker-compose/tron-fullnode.yml b/metric_monitor/docker-compose/tron-fullnode.yml index 127425e6..78af387e 100644 --- a/metric_monitor/docker-compose/tron-fullnode.yml +++ b/metric_monitor/docker-compose/tron-fullnode.yml @@ -1,9 +1,8 @@ version: '3.8' services: tron-node: - image: tronprotocol/java-tron:latest + image: tronprotocol/java-tron:latest # always use the lastest container_name: tron-node1 - user: root deploy: resources: limits: @@ -15,10 +14,10 @@ services: - "18888:18888/udp" # for p2p node discovery - "50051:50051" volumes: - - ../../conf:/java-tron/conf # ensure the config path is correct, make sure the config file metric is enabled + - ../conf:/java-tron/conf # ensure the config path is correct, make sure the config file metric is enabled - ../output-directory:/java-tron/output-directory # mount a local directory to make the block data persistent. - ../logs:/java-tron/logs command: > - -jvm "{-Xmx14g -Xmn1g -XX:MaxDirectMemorySize=1G -XX:+UseConcMarkSweepGC -XX:+PrintGC -Xloggc:./logs/gc.log }" -c ./conf/main_net_config.conf + -jvm "{-Xmx14g -Xmn1g -XX:MaxDirectMemorySize=1G -XX:+UseConcMarkSweepGC -XX:+PrintGC -Xloggc:./logs/gc.log }" -c ./conf/main_net_config_open_metric.conf environment: - TZ=Asia/Shanghai # The metric and log date will be changed accordingly diff --git a/private_net/README.md b/private_net/README.md index 597fd18f..11541602 100644 --- a/private_net/README.md +++ b/private_net/README.md @@ -4,6 +4,17 @@ Here is a quick-start guide for setting up a TRON private network using Docker. A private chain needs at least one fullnode run by a [Super Representative (SR)](https://tronprotocol.github.io/documentation-en/mechanism-algorithm/sr/) to produce blocks, and any number of fullnodes to synchronize blocks and broadcast transactions. +## ⚠️ Security Notice + +The configuration files in this directory contain **DEMO private keys for local testing purposes ONLY**. + +**IMPORTANT:** +- These keys are publicly known and should NEVER be used in production +- Always generate new private keys for any real deployment +- NEVER use these keys on mainnet or with real funds +- For production use, consider using encrypted keystore files instead of plaintext private keys +- Refer to the [localwitnesskeystore documentation](https://tronprotocol.github.io/documentation-en/using_javatron/installing_javatron/?h=keystore#specifying-super-representative-account-private-key-using-keystore-password) for secure key management + ## Prerequisites ### Minimum hardware requirements diff --git a/run-checkstyle.sh b/run-checkstyle.sh index 3d574ff6..db2d1b1a 100755 --- a/run-checkstyle.sh +++ b/run-checkstyle.sh @@ -1,64 +1,67 @@ -#!/bin/sh +#!/bin/bash +set -eu # exit on any error -# Define the URL for the Checkstyle JAR file +# ─── Configuration ─── CHECKSTYLE_VERSION="8.42" CHECKSTYLE_JAR="checkstyle-${CHECKSTYLE_VERSION}-all.jar" CHECKSTYLE_URL="https://github.com/checkstyle/checkstyle/releases/download/checkstyle-${CHECKSTYLE_VERSION}/${CHECKSTYLE_JAR}" -# Define the directory where the JAR file should be stored -LIB_DIR="libs" +# SHA-256 digest of the official release JAR +# Obtain from: https://github.com/checkstyle/checkstyle/releases/tag/checkstyle-8.42 +# or run: sha256sum checkstyle-8.42-all.jar +CHECKSTYLE_SHA256="4982ebeaa429fe41f3be2c3309a5c49d84c71ee1f78f967344b8bc82cf3101aa" -# Create the directory if it does not exist +LIB_DIR="libs" mkdir -p "$LIB_DIR" - -# Define the full path to the JAR file CHECKSTYLE_PATH="${LIB_DIR}/${CHECKSTYLE_JAR}" -# Check if the JAR file already exists +# ─── Download with integrity verification ─── +verify_checksum() { + local file="$1" + local expected="$2" + local actual + actual=$(sha256sum "$file" | awk '{print $1}') + if [ "$actual" != "$expected" ]; then + echo "ERROR: Checksum mismatch for ${file}" >&2 + echo " Expected: ${expected}" >&2 + echo " Actual: ${actual}" >&2 + rm -f "$file" + exit 1 + fi + echo "Checksum verified: ${file}" +} + if [ -f "$CHECKSTYLE_PATH" ]; then - echo "Checkstyle JAR file already exists at ${CHECKSTYLE_PATH}" + echo "Checkstyle JAR exists at ${CHECKSTYLE_PATH}, verifying integrity..." + verify_checksum "$CHECKSTYLE_PATH" "$CHECKSTYLE_SHA256" else - echo "Checkstyle JAR file not found. Downloading from ${CHECKSTYLE_URL}..." - curl -L -o "$CHECKSTYLE_PATH" "$CHECKSTYLE_URL" && echo "downloaded successfully" + echo "Downloading Checkstyle from ${CHECKSTYLE_URL}..." + curl --fail --silent --show-error -L -o "$CHECKSTYLE_PATH" "$CHECKSTYLE_URL" + verify_checksum "$CHECKSTYLE_PATH" "$CHECKSTYLE_SHA256" + echo "Downloaded and verified successfully." fi -# Add the lib directory to .gitignore if it's not already present +# ─── .gitignore handling ─── GITIGNORE_FILE=".gitignore" - -if ! grep -q "^${LIB_DIR}/$" "$GITIGNORE_FILE"; then - echo "Adding ${LIB_DIR}/ to ${GITIGNORE_FILE}" +if [ -f "$GITIGNORE_FILE" ] && ! grep -q "^${LIB_DIR}/$" "$GITIGNORE_FILE"; then echo "${LIB_DIR}/" >> "$GITIGNORE_FILE" -else - echo "${LIB_DIR}/ is already in ${GITIGNORE_FILE}" fi -# Check if there are any Java files in the project -JAVA_FILES_FOUND=$(find . -name "*.java") +# ─── Find and check Java files ─── +CHECKSTYLE_CONFIG="./conf/checkstyle/checkStyleAll.xml" -# if there is java file go ahead to trigger checkStyle for all java files -if [ -z "$JAVA_FILES_FOUND" ]; then - echo "No Java files found in the project." +# Use find with -print0 / xargs -0 to handle filenames with spaces +JAVA_COUNT=$(find . -name "*.java" | wc -l) +if [ "$JAVA_COUNT" -eq 0 ]; then + echo "No Java files found." exit 0 -else - echo "Java files found in the project. Now run further checks." fi -# Path to your Checkstyle configuration file -CHECKSTYLE_CONFIG="./conf/checkstyle/checkStyleAll.xml" - -# shellcheck disable=SC2027 -# shellcheck disable=SC2046 - -All_Java_Files=$(find . -name "*.java") - -# Run Checkstyle on all Java files in the repository -java -jar "$CHECKSTYLE_PATH" -c "$CHECKSTYLE_CONFIG" "$All_Java_Files" - -echo "finish ...." +echo "Found ${JAVA_COUNT} Java files, running Checkstyle..." -# Capture the exit code of Checkstyle -# shellcheck disable=SC2320 +# Capture exit code correctly (not after echo) +find . -name "*.java" -print0 | xargs -0 java -jar "$CHECKSTYLE_PATH" -c "$CHECKSTYLE_CONFIG" STATUS=$? -# Exit with the same status code as Checkstyle +echo "Checkstyle finished with exit code: ${STATUS}" exit $STATUS diff --git a/tools/stress_test/README.md b/tools/stress_test/README.md index db8e1b39..a254556d 100644 --- a/tools/stress_test/README.md +++ b/tools/stress_test/README.md @@ -3,6 +3,16 @@ The stress testing tool is designed to evaluate the performance of the `java-tro It can generate a large volume of transactions, store them locally, and broadcast them to the test network. Finally, it provides a TPS (Transactions Per Second) report as the stress test result. +### ⚠️ Security Notice + +The `stress.conf` file contains placeholder values for configuration purposes only. + +**IMPORTANT:** +- Always replace the placeholder `privateKey` with your own test private key before running the tool +- Never commit your real private keys to version control +- Use test networks (Nile, Shasta) for stress testing, not mainnet with real funds +- Ensure your test private key has sufficient balance for the stress test (use DBFork tool if needed) + ### Build the stress test tool To build the stress test tool, we need to execute the following commands: ```shell script @@ -60,11 +70,11 @@ generateTx = { transferTrc20 = 30 } updateRefUrl = "127.0.0.1:50051" - // TRY18iTFy6p8yhWiCt1dhd2gz2c15ungq3 - privateKey = "aab926e86a17f0f46b4d22e61725edd5770a5b0fbdabb04b0f46ee499b1e34f2" + // IMPORTANT: Replace with your own private key + privateKey = "YOUR_PRIVATE_KEY_HERE_64_CHARACTERS_HEXADECIMAL_STRING_EXAMPLE" addressListFile = "/path/to/address-list.csv" trc10Id = 1000001 - trc20Address = "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t" + trc20Address = "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t" // Mainnet USDT - for reference only } ``` Here is the introduction for the configuration options: @@ -80,7 +90,7 @@ including `transfer`, `transferTrc10`, `transferTrc20`. The sum of all transacti `updateRefUrl`: configure the url which is used to update the `refBlockNum` and `refBlockNum` when generating the transactions; -`privateKey`: configure the private key used to sign the transactions; +`privateKey`: configure the private key used to sign the transactions. **IMPORTANT: Replace the placeholder with your own test private key. Never use keys with real funds.** `addressListFile`: configure the file path of receiver address list used to build the transactions; diff --git a/tools/stress_test/src/main/resources/stress.conf b/tools/stress_test/src/main/resources/stress.conf index e7c55d00..29f32394 100644 --- a/tools/stress_test/src/main/resources/stress.conf +++ b/tools/stress_test/src/main/resources/stress.conf @@ -16,11 +16,13 @@ generateTx = { transferTrc20 = 30 } updateRefUrl = "127.0.0.1:50051" - // TRY18iTFy6p8yhWiCt1dhd2gz2c15ungq3 - privateKey = "aab926e86a17f0f46b4d22e61725edd5770a5b0fbdabb04b0f46ee499b1e34f2" + // IMPORTANT: Replace with your own private key for testing + // Example format: 64-character hexadecimal string + // WARNING: Never use this key on mainnet or with real funds + privateKey = "YOUR_PRIVATE_KEY_HERE_64_CHARACTERS_HEXADECIMAL_STRING_EXAMPLE" addressListFile = "/path/to/address-list.csv" trc10Id = 1000001 - trc20Address = "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t" + trc20Address = "TR7NHqjeKQxGTCi8q8ZY4pL8otSzgjLj6t" // Mainnet USDT - for reference only } relayTx = { diff --git a/tools/trond/README.md b/tools/trond/README.md index 0802d61b..90ff01a0 100644 --- a/tools/trond/README.md +++ b/tools/trond/README.md @@ -99,6 +99,33 @@ Here is an example of stopping the node using `trond`: For more detailed usage instructions, refer to the help command or the [command documentation](./docs/trond.md). +## Security Configuration + +### SSH Host Key Verification + +When using `trond` for remote node deployment, SSH connections are made to remote servers. By default, host key verification is disabled for ease of testing and demonstration. + +For detailed security configuration and best practices, see [SECURITY.md](SECURITY.md). + +**Quick Start for Production:** + +```bash +# Enable strict host key verification +export TROND_STRICT_HOST_KEY_CHECK=true + +# Add remote servers to known_hosts +ssh-keyscan -H remote-server-ip >> ~/.ssh/known_hosts + +# Then run trond commands +./trond node env-multi +./trond node run-multi start +``` + +For more information, refer to the security documentation for each command: +- [trond node env-multi](docs/trond_node_env-multi.md#security-configuration) +- [trond node run-multi](docs/trond_node_run-multi.md#security-configuration) +- [trond node run-multi stop](docs/trond_node_run-multi_stop.md#security-configuration) + ## TroubleShooting If you have any issues starting a java-tron node, please refer to the corresponding [TroubleShooting](../../single_node/README.md#troubleshot) guide. diff --git a/tools/trond/cmd/node/envMultiNodes.go b/tools/trond/cmd/node/envMultiNodes.go index 4f299f7b..b81ae437 100644 --- a/tools/trond/cmd/node/envMultiNodes.go +++ b/tools/trond/cmd/node/envMultiNodes.go @@ -24,9 +24,46 @@ var envMultiCmd = &cobra.Command{ - Configuration file for private network layout (Please refer to the example configuration file and rewrite it according to your needs) ./conf/private_net_layout.toml + + SECURITY CONFIGURATION: + + When deploying to remote nodes, SSH connections are used to transfer configuration files and manage the environment. By default, host key verification is disabled for ease of testing. + + For Production Environments: + + Enable strict host key checking to prevent man-in-the-middle attacks: + + # Enable strict host key verification + export TROND_STRICT_HOST_KEY_CHECK=true + + # Optional: Specify custom known_hosts file location + export TROND_KNOWN_HOSTS_FILE=/path/to/your/known_hosts + + # Then run the command + ./trond node env-multi + + Setting up known_hosts file: + + 1. First, manually SSH to each remote server to add it to known_hosts: + ssh user@remote-server-ip + + 2. Or use ssh-keyscan to add host keys: + ssh-keyscan -H remote-server-ip >> ~/.ssh/known_hosts + + For Testing/Development: + + Host key verification is disabled by default. You'll see a warning message: + ⚠️ WARNING: Host key verification is DISABLED (testing mode) + ⚠️ For production use, set TROND_STRICT_HOST_KEY_CHECK=true + + For more details, see SECURITY.md `), Example: heredoc.Doc(` - # Check and configure node local environment + # Check and configure node local environment (testing mode) + $ ./trond node env-multi + + # Check and configure with strict host key verification (production mode) + $ export TROND_STRICT_HOST_KEY_CHECK=true $ ./trond node env-multi # Use the scp command to copy files and synchronize databases between multiple nodes: diff --git a/tools/trond/cmd/node/runMulti.go b/tools/trond/cmd/node/runMulti.go index bd75d421..b0596e8f 100644 --- a/tools/trond/cmd/node/runMulti.go +++ b/tools/trond/cmd/node/runMulti.go @@ -19,9 +19,46 @@ var runMultiCmd = &cobra.Command{ - Configuration file for private network layout (Please refer to the example configuration file and rewrite it according to your needs) ./conf/private_net_layout.toml + SECURITY CONFIGURATION: + + When deploying to remote nodes, SSH connections are used to start and manage docker-compose services. By default, host key verification is disabled for ease of testing. + + For Production Environments: + + Enable strict host key checking to prevent man-in-the-middle attacks: + + # Enable strict host key verification + export TROND_STRICT_HOST_KEY_CHECK=true + + # Optional: Specify custom known_hosts file location + export TROND_KNOWN_HOSTS_FILE=/path/to/your/known_hosts + + # Then run the command + ./trond node run-multi + + Setting up known_hosts file: + + 1. First, manually SSH to each remote server to add it to known_hosts: + ssh user@remote-server-ip + + 2. Or use ssh-keyscan to add host keys: + ssh-keyscan -H remote-server-ip >> ~/.ssh/known_hosts + + For Testing/Development: + + Host key verification is disabled by default. You'll see a warning message: + ⚠️ WARNING: Host key verification is DISABLED (testing mode) + ⚠️ For production use, set TROND_STRICT_HOST_KEY_CHECK=true + + For more details, see SECURITY.md + `), Example: heredoc.Doc(` - # Run java-tron nodes according to ./conf/private_net_layout.toml + # Run java-tron nodes according to ./conf/private_net_layout.toml (testing mode) + $ ./trond node run-multi + + # Run with strict host key verification (production mode) + $ export TROND_STRICT_HOST_KEY_CHECK=true $ ./trond node run-multi `), Run: func(cmd *cobra.Command, args []string) { @@ -75,9 +112,38 @@ var runMultiStopCmd = &cobra.Command{ - Configuration file(by default, these exist in the current repository directory) ./conf/private_net_layout.toml + SECURITY CONFIGURATION: + + When stopping remote nodes, SSH connections are used to execute docker-compose commands. By default, host key verification is disabled for ease of testing. + + For Production Environments: + + Enable strict host key checking to prevent man-in-the-middle attacks: + + # Enable strict host key verification + export TROND_STRICT_HOST_KEY_CHECK=true + + # Optional: Specify custom known_hosts file location + export TROND_KNOWN_HOSTS_FILE=/path/to/your/known_hosts + + # Then run the command + ./trond node run-multi stop + + For Testing/Development: + + Host key verification is disabled by default. You'll see a warning message: + ⚠️ WARNING: Host key verification is DISABLED (testing mode) + ⚠️ For production use, set TROND_STRICT_HOST_KEY_CHECK=true + + For more details, see SECURITY.md + `), Example: heredoc.Doc(` - # Stop multi java-tron node + # Stop multi java-tron node (testing mode) + $ ./trond node run-multi stop + + # Stop with strict host key verification (production mode) + $ export TROND_STRICT_HOST_KEY_CHECK=true $ ./trond node run-multi stop `), Run: func(cmd *cobra.Command, args []string) { diff --git a/tools/trond/docs/trond_node_env-multi.md b/tools/trond/docs/trond_node_env-multi.md index 42874a2f..750c7fcd 100644 --- a/tools/trond/docs/trond_node_env-multi.md +++ b/tools/trond/docs/trond_node_env-multi.md @@ -15,6 +15,39 @@ The following files are required: - Configuration file for private network layout (Please refer to the example configuration file and rewrite it according to your needs) ./conf/private_net_layout.toml +SECURITY CONFIGURATION: + +When deploying to remote nodes, SSH connections are used to transfer configuration files and manage the environment. By default, host key verification is disabled for ease of testing. + +For Production Environments: + +Enable strict host key checking to prevent man-in-the-middle attacks: + + # Enable strict host key verification + export TROND_STRICT_HOST_KEY_CHECK=true + + # Optional: Specify custom known_hosts file location + export TROND_KNOWN_HOSTS_FILE=/path/to/your/known_hosts + + # Then run the command + ./trond node env-multi + +Setting up known_hosts file: + +1. First, manually SSH to each remote server to add it to known_hosts: + ssh user@remote-server-ip + +2. Or use ssh-keyscan to add host keys: + ssh-keyscan -H remote-server-ip >> ~/.ssh/known_hosts + +For Testing/Development: + +Host key verification is disabled by default. You'll see a warning message: + ⚠️ WARNING: Host key verification is DISABLED (testing mode) + ⚠️ For production use, set TROND_STRICT_HOST_KEY_CHECK=true + +For more details, see SECURITY.md + ``` trond node env-multi [flags] @@ -23,7 +56,11 @@ trond node env-multi [flags] ### Examples ``` -# Check and configure node local environment +# Check and configure node local environment (testing mode) +$ ./trond node env-multi + +# Check and configure with strict host key verification (production mode) +$ export TROND_STRICT_HOST_KEY_CHECK=true $ ./trond node env-multi # Use the scp command to copy files and synchronize databases between multiple nodes: diff --git a/tools/trond/docs/trond_node_run-multi.md b/tools/trond/docs/trond_node_run-multi.md index edb163d2..5c28316a 100644 --- a/tools/trond/docs/trond_node_run-multi.md +++ b/tools/trond/docs/trond_node_run-multi.md @@ -11,6 +11,39 @@ The following files are required: - Configuration file for private network layout (Please refer to the example configuration file and rewrite it according to your needs) ./conf/private_net_layout.toml +SECURITY CONFIGURATION: + +When deploying to remote nodes, SSH connections are used to start and manage docker-compose services. By default, host key verification is disabled for ease of testing. + +For Production Environments: + +Enable strict host key checking to prevent man-in-the-middle attacks: + + # Enable strict host key verification + export TROND_STRICT_HOST_KEY_CHECK=true + + # Optional: Specify custom known_hosts file location + export TROND_KNOWN_HOSTS_FILE=/path/to/your/known_hosts + + # Then run the command + ./trond node run-multi + +Setting up known_hosts file: + +1. First, manually SSH to each remote server to add it to known_hosts: + ssh user@remote-server-ip + +2. Or use ssh-keyscan to add host keys: + ssh-keyscan -H remote-server-ip >> ~/.ssh/known_hosts + +For Testing/Development: + +Host key verification is disabled by default. You'll see a warning message: + ⚠️ WARNING: Host key verification is DISABLED (testing mode) + ⚠️ For production use, set TROND_STRICT_HOST_KEY_CHECK=true + +For more details, see SECURITY.md + ``` @@ -20,7 +53,11 @@ trond node run-multi [flags] ### Examples ``` -# Run java-tron nodes according to ./conf/private_net_layout.toml +# Run java-tron nodes according to ./conf/private_net_layout.toml (testing mode) +$ ./trond node run-multi + +# Run with strict host key verification (production mode) +$ export TROND_STRICT_HOST_KEY_CHECK=true $ ./trond node run-multi ``` diff --git a/tools/trond/docs/trond_node_run-multi_stop.md b/tools/trond/docs/trond_node_run-multi_stop.md index 275571d7..340a410d 100644 --- a/tools/trond/docs/trond_node_run-multi_stop.md +++ b/tools/trond/docs/trond_node_run-multi_stop.md @@ -9,6 +9,31 @@ The following configuration files are required: - Configuration file(by default, these exist in the current repository directory) ./conf/private_net_layout.toml +SECURITY CONFIGURATION: + +When stopping remote nodes, SSH connections are used to execute docker-compose commands. By default, host key verification is disabled for ease of testing. + +For Production Environments: + +Enable strict host key checking to prevent man-in-the-middle attacks: + + # Enable strict host key verification + export TROND_STRICT_HOST_KEY_CHECK=true + + # Optional: Specify custom known_hosts file location + export TROND_KNOWN_HOSTS_FILE=/path/to/your/known_hosts + + # Then run the command + ./trond node run-multi stop + +For Testing/Development: + +Host key verification is disabled by default. You'll see a warning message: + ⚠️ WARNING: Host key verification is DISABLED (testing mode) + ⚠️ For production use, set TROND_STRICT_HOST_KEY_CHECK=true + +For more details, see SECURITY.md + ``` @@ -18,7 +43,11 @@ trond node run-multi stop [flags] ### Examples ``` -# Stop multi java-tron node +# Stop multi java-tron node (testing mode) +$ ./trond node run-multi stop + +# Stop with strict host key verification (production mode) +$ export TROND_STRICT_HOST_KEY_CHECK=true $ ./trond node run-multi stop ``` diff --git a/tools/trond/utils/http.go b/tools/trond/utils/http.go index a319cb06..41dbb8ea 100644 --- a/tools/trond/utils/http.go +++ b/tools/trond/utils/http.go @@ -21,6 +21,44 @@ import ( "golang.org/x/net/html" ) +// secureResolveWithinDir resolves a path through the real filesystem (following symlinks) +// and verifies the result stays within the allowed directory. +// This is the single, authoritative boundary check used by the tar extractor. +func secureResolveWithinDir(destDir, targetPath string) error { + // Resolve destDir itself to an absolute, symlink-free path + absDestDir, err := filepath.Abs(destDir) + if err != nil { + return fmt.Errorf("failed to resolve destination directory: %v", err) + } + // EvalSymlinks on destDir (which must already exist) + realDestDir, err := filepath.EvalSymlinks(absDestDir) + if err != nil { + return fmt.Errorf("failed to resolve destination directory symlinks: %v", err) + } + + // Resolve the target through the real filesystem. + // Walk up to the deepest existing ancestor, then validate the rest lexically. + realTarget, err := filepath.EvalSymlinks(targetPath) + if err != nil { + // If the full path doesn't exist yet, resolve the parent (which should exist + // because we create dirs in order) and append the base name. + parentDir := filepath.Dir(targetPath) + realParent, err2 := filepath.EvalSymlinks(parentDir) + if err2 != nil { + return fmt.Errorf("failed to resolve parent path %q: %v", parentDir, err2) + } + realTarget = filepath.Join(realParent, filepath.Base(targetPath)) + } + + realTarget = filepath.Clean(realTarget) + prefix := realDestDir + string(os.PathSeparator) + + if realTarget != realDestDir && !strings.HasPrefix(realTarget, prefix) { + return fmt.Errorf("path %q resolves to %q which is outside destination %q", targetPath, realTarget, realDestDir) + } + return nil +} + // Function to fetch and parse HTML, extracting absolute links func fetchAndExtractLinks(webURL string) ([]string, error) { resp, err := http.Get(webURL) @@ -460,75 +498,121 @@ func ExtractTgzWithStatus(tgzFile, destDir string) error { if err := os.MkdirAll(target, os.FileMode(header.Mode)); err != nil { return fmt.Errorf("failed to create directory: %v", err) } + // Verify the created directory resolves within destDir + if err := secureResolveWithinDir(destDir, target); err != nil { + return fmt.Errorf("directory traversal detected in dir entry %q: %v", header.Name, err) + } + case tar.TypeReg: // Ensure parent directory exists if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { return fmt.Errorf("failed to create parent directory: %v", err) } - // Create file - outFile, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY, os.FileMode(header.Mode)) + // Verify the target path resolves within destDir BEFORE creating the file. + // This catches traversal via previously extracted symlinks in the path. + if err := secureResolveWithinDir(destDir, target); err != nil { + return fmt.Errorf("directory traversal detected in file entry %q: %v", header.Name, err) + } + + // Create file with explicit O_NOFOLLOW-like behavior: + // use Lstat to check the target doesn't already exist as a symlink + if info, err := os.Lstat(target); err == nil && info.Mode()&os.ModeSymlink != 0 { + return fmt.Errorf("refusing to write through existing symlink: %s", header.Name) + } + + outFile, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(header.Mode)) if err != nil { return fmt.Errorf("failed to create file: %v", err) } - defer outFile.Close() // Read data in chunks and write to the output file buf := make([]byte, 32*1024) // 32KB buffer for reading for { - n, err := tr.Read(buf) + n, readErr := tr.Read(buf) if n > 0 { - // Write the data to the file if _, writeErr := outFile.Write(buf[:n]); writeErr != nil { + outFile.Close() return fmt.Errorf("failed to write file: %v", writeErr) } - - // Update the total extracted size and file count totalExtractedSize += int64(n) } - if err == io.EOF { + if readErr == io.EOF { break } - if err != nil { - return fmt.Errorf("error reading file: %v", err) + if readErr != nil { + outFile.Close() + return fmt.Errorf("error reading file: %v", readErr) } } + outFile.Close() - // Increment the file count totalFilesExtracted++ + case tar.TypeSymlink: - // Resolve the symlink target - resolvedLinkname, err := filepath.EvalSymlinks(filepath.Join(filepath.Dir(target), header.Linkname)) + // Validate: the symlink TARGET must resolve within destDir. + // Compute what the symlink would point to. + linkTarget := filepath.Join(filepath.Dir(target), header.Linkname) + linkTarget = filepath.Clean(linkTarget) + + // Validate the resolved link target stays within destDir. + // Use lexical check first (works even if target doesn't exist yet). + absDestDir, err := filepath.Abs(destDir) + if err != nil { + return fmt.Errorf("failed to resolve destDir: %v", err) + } + absLinkTarget, err := filepath.Abs(linkTarget) if err != nil { return fmt.Errorf("failed to resolve symlink target: %v", err) } - // Sanitize symlink target to prevent directory traversal - if !strings.HasPrefix(filepath.Clean(resolvedLinkname), filepath.Clean(destDir)+string(os.PathSeparator)) { - return fmt.Errorf("invalid symlink target in archive: %s -> %s", header.Name, header.Linkname) + if absLinkTarget != absDestDir && !strings.HasPrefix(absLinkTarget, absDestDir+string(os.PathSeparator)) { + return fmt.Errorf("symlink %q -> %q escapes destination directory (resolves to %q)", header.Name, header.Linkname, absLinkTarget) + } + + // If the parent directory of the symlink target exists on the filesystem, + // also verify through EvalSymlinks (catches multi-hop symlink chains). + if _, statErr := os.Lstat(filepath.Dir(linkTarget)); statErr == nil { + if err := secureResolveWithinDir(destDir, linkTarget); err != nil { + return fmt.Errorf("symlink chain traversal detected: %q -> %q: %v", header.Name, header.Linkname, err) + } + } + + // Also verify the symlink itself will be placed within destDir + if err := secureResolveWithinDir(destDir, filepath.Dir(target)); err != nil { + return fmt.Errorf("symlink location traversal detected: %q: %v", header.Name, err) } - // Create symlink - if err := os.Symlink(resolvedLinkname, target); err != nil { + // Remove any existing entry at target to prevent symlink-following attacks + os.Remove(target) + + // Create symlink using the ORIGINAL relative linkname (not an absolute resolved path) + if err := os.Symlink(header.Linkname, target); err != nil { return fmt.Errorf("failed to create symlink: %v", err) } + case tar.TypeLink: - // Create hard link + // Hard link: both source and target must resolve within destDir linkTarget := filepath.Join(destDir, header.Linkname) - // Sanitize the link target to prevent directory traversal - // Resolve the hard link target + + // Verify the hard link target resolves within destDir + if err := secureResolveWithinDir(destDir, linkTarget); err != nil { + return fmt.Errorf("hard link target traversal: %q -> %q: %v", header.Name, header.Linkname, err) + } + // Verify the hard link location resolves within destDir + if err := secureResolveWithinDir(destDir, filepath.Dir(target)); err != nil { + return fmt.Errorf("hard link location traversal: %q: %v", header.Name, err) + } + + // Use the RESOLVED target for creating the hard link (consistent with validation) resolvedLinkTarget, err := filepath.EvalSymlinks(linkTarget) if err != nil { return fmt.Errorf("failed to resolve hard link target: %v", err) } - // Ensure the resolved hard link target stays within the destination directory - if !strings.HasPrefix(filepath.Clean(resolvedLinkTarget), filepath.Clean(destDir)+string(os.PathSeparator)) { - return fmt.Errorf("attempted directory traversal in hard link: %s -> %s", header.Name, header.Linkname) - } - - if err := os.Link(linkTarget, target); err != nil { + if err := os.Link(resolvedLinkTarget, target); err != nil { return fmt.Errorf("failed to create hard link: %v", err) } + default: fmt.Printf("Skipping unknown file type: %v\n", header.Typeflag) } diff --git a/tools/trond/utils/ssh.go b/tools/trond/utils/ssh.go index b36bfc5c..3a96bf51 100644 --- a/tools/trond/utils/ssh.go +++ b/tools/trond/utils/ssh.go @@ -6,13 +6,86 @@ import ( "net" "os" "path/filepath" + "regexp" + "strings" "time" "github.com/schollz/progressbar/v3" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" + "golang.org/x/crypto/ssh/knownhosts" ) +// validPathPattern allows only safe characters in remote paths: +// alphanumeric, slash, dot, hyphen, underscore +var validPathPattern = regexp.MustCompile(`^[a-zA-Z0-9/._-]+$`) + +// validateRemotePath checks that a path contains no shell-injectable characters. +// This is the primary defense against command injection via user-controlled paths. +func validateRemotePath(path string) error { + if path == "" { + return fmt.Errorf("path cannot be empty") + } + if !validPathPattern.MatchString(path) { + return fmt.Errorf("invalid characters in remote path %q: only alphanumeric, '/', '.', '-', '_' are allowed", path) + } + // Block path traversal attempts + if strings.Contains(path, "..") { + return fmt.Errorf("path traversal ('..') is not allowed in remote path: %s", path) + } + return nil +} + +// validateSCPFileName validates a filename for SCP transfer. +// SCP protocol is sensitive to special characters that could break the control line format. +// The control line format is: C \n +// Filenames with newlines, carriage returns, or other control characters can break this format. +func validateSCPFileName(fileName string) error { + if fileName == "" { + return fmt.Errorf("filename cannot be empty") + } + + // Check for path traversal attempts + if strings.Contains(fileName, "/") || strings.Contains(fileName, "\\") { + return fmt.Errorf("filename cannot contain path separators: %s", fileName) + } + + // Block path traversal attempts + if strings.Contains(fileName, "..") { + return fmt.Errorf("filename cannot contain path traversal sequences: %s", fileName) + } + + // Check for control characters that could break SCP protocol + // SCP control line ends with \n, so any \n or \r in filename breaks the protocol + for _, ch := range fileName { + // Block control characters (0x00-0x1F) and DEL (0x7F) + if ch < 0x20 || ch == 0x7F { + return fmt.Errorf("filename contains invalid control character (0x%02X): %s", ch, fileName) + } + // Block characters that have special meaning in SCP protocol or shell + // While we use proper escaping, defense-in-depth means rejecting these outright + if strings.ContainsRune("*?[]{}()<>|;&$`\\\"'", ch) { + return fmt.Errorf("filename contains shell-special character %q: %s", ch, fileName) + } + } + + // Ensure filename is not too long (SCP protocol limitation) + if len(fileName) > 255 { + return fmt.Errorf("filename exceeds maximum length (255 bytes): %s", fileName) + } + + return nil +} + +// shellQuote wraps a string in single quotes with proper escaping. +// This is the secondary defense — even if validation is bypassed, +// the shell will treat the entire string as a literal argument. +func shellQuote(s string) string { + // Replace each single quote with: end quote, escaped single quote, start quote + // e.g., "it's" -> "'it'\''s'" + return "'" + strings.ReplaceAll(s, "'", "'\\''") + "'" +} + // Check if a port is open on the given IP func CheckPort(ip string, port int) bool { timeout := 5 * time.Second @@ -24,10 +97,54 @@ func CheckPort(ip string, port int) bool { return true } +// getHostKeyCallback returns the appropriate HostKeyCallback based on environment +// For production use, set TROND_STRICT_HOST_KEY_CHECK=true environment variable +func getHostKeyCallback() (ssh.HostKeyCallback, error) { + // Check if strict host key checking is enabled via environment variable + strictCheck := os.Getenv("TROND_STRICT_HOST_KEY_CHECK") + + if strictCheck == "true" || strictCheck == "1" { + // Production mode: Use known_hosts file for verification + knownHostsPath := os.Getenv("TROND_KNOWN_HOSTS_FILE") + if knownHostsPath == "" { + // Default to user's known_hosts file + homeDir, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("failed to get home directory: %v", err) + } + knownHostsPath = filepath.Join(homeDir, ".ssh", "known_hosts") + } + + // Check if known_hosts file exists + if _, err := os.Stat(knownHostsPath); os.IsNotExist(err) { + return nil, fmt.Errorf("known_hosts file not found at %s. Please create it or disable strict host key checking for testing", knownHostsPath) + } + + callback, err := knownhosts.New(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("failed to load known_hosts file: %v", err) + } + + fmt.Println(" ✓ Using strict host key verification (production mode)") + return callback, nil + } + + // Development/Testing mode: Accept any host key with warning + fmt.Println(" ⚠️ WARNING: Host key verification is DISABLED (testing mode)") + fmt.Println(" ⚠️ For production use, set TROND_STRICT_HOST_KEY_CHECK=true") + return ssh.InsecureIgnoreHostKey(), nil +} + func SSHConnect(ip string, port int, user, password, keyPath string) (*ssh.Client, error) { + // Get appropriate host key callback + hostKeyCallback, err := getHostKeyCallback() + if err != nil { + return nil, fmt.Errorf("failed to configure host key verification: %v", err) + } + sshConfig := &ssh.ClientConfig{ User: user, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), // Replace with known hosts verification in production + HostKeyCallback: hostKeyCallback, Timeout: 5 * time.Second, } @@ -139,14 +256,26 @@ func SCPFile(ip string, port int, user, password, keyPath, localPath, remotePath } defer stdin.Close() - // Start SCP command on the remote server - scpCmd := fmt.Sprintf("scp -t %s", filepath.Dir(remotePath)) + // Validate remote path before constructing shell command + remotePathDir := filepath.Dir(remotePath) + if err := validateRemotePath(remotePathDir); err != nil { + return fmt.Errorf("unsafe remote path for SCP: %v", err) + } + + // Start SCP command on the remote server (quoted to prevent injection) + scpCmd := fmt.Sprintf("scp -t %s", shellQuote(remotePathDir)) if err := session.Start(scpCmd); err != nil { return fmt.Errorf("failed to start SCP command: %v", err) } - // Send SCP file metadata - _, err = fmt.Fprintf(stdin, "C0644 %d %s\n", fileSize, filepath.Base(remotePath)) + // Validate and sanitize the filename + fileName := filepath.Base(remotePath) + if err := validateSCPFileName(fileName); err != nil { + return fmt.Errorf("unsafe filename for SCP: %v", err) + } + + // Send SCP file metadata with properly escaped filename + _, err = fmt.Fprintf(stdin, "C0644 %d %s\n", fileSize, fileName) if err != nil { return fmt.Errorf("failed to send file metadata: %v", err) } @@ -195,8 +324,13 @@ func SSHMkdirIfNotExist(ip string, port int, user, password, keyPath, remoteDir } defer session.Close() - // Check if the directory exists - checkCmd := fmt.Sprintf("[ -d \"%s\" ] && echo \"exists\" || echo \"not exists\"", remoteDir) + // Validate remote directory path before constructing shell commands + if err := validateRemotePath(remoteDir); err != nil { + return fmt.Errorf("unsafe remote directory path: %v", err) + } + + // Check if the directory exists (quoted to prevent injection) + checkCmd := fmt.Sprintf("[ -d %s ] && echo \"exists\" || echo \"not exists\"", shellQuote(remoteDir)) output, err := session.CombinedOutput(checkCmd) if err != nil { return fmt.Errorf("failed to check directory existence: %v", err) @@ -215,8 +349,8 @@ func SSHMkdirIfNotExist(ip string, port int, user, password, keyPath, remoteDir } defer session.Close() - // Execute mkdir command remotely - cmd := fmt.Sprintf("mkdir -p %s", remoteDir) + // Execute mkdir command remotely (path already validated above) + cmd := fmt.Sprintf("mkdir -p %s", shellQuote(remoteDir)) if err := session.Run(cmd); err != nil { return fmt.Errorf("failed to create directory: %v", err) } @@ -241,10 +375,15 @@ func RunRemoteCompose(ip string, port int, user, password, keyPath, composePath } defer session.Close() - // Construct the docker-compose command - cmd := fmt.Sprintf("docker-compose -f %s up -d", composePath) + // Validate compose file path before constructing shell command + if err := validateRemotePath(composePath); err != nil { + return fmt.Errorf("unsafe docker-compose path: %v", err) + } + + // Construct the docker-compose command (quoted to prevent injection) + cmd := fmt.Sprintf("docker-compose -f %s up -d", shellQuote(composePath)) if down { - cmd = fmt.Sprintf("docker-compose -f %s down", composePath) + cmd = fmt.Sprintf("docker-compose -f %s down", shellQuote(composePath)) } // Run the command remotely