Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 5 additions & 10 deletions examples/cpp/AlexNet/alexnet.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,6 @@ using FlexFlow::Tensor;

LegionRuntime::Logger::Category log_app("AlexNet");

void parse_input_args(char **argv, int argc, AlexNetConfig &config) {
for (int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--dataset")) {
std::strcpy(config.dataset_path, argv[++i]);
continue;
}
}
}

void FlexFlow::top_level_task(Task const *task,
std::vector<PhysicalRegion> const &regions,
Context ctx,
Expand All @@ -47,7 +38,11 @@ void FlexFlow::top_level_task(Task const *task,
InputArgs const &command_args = HighLevelRuntime::get_input_args();
char **argv = command_args.argv;
int argc = command_args.argc;
parse_input_args(argv, argc, alexnetConfig);
ArgsParser args;
args.add_argument("--dataset", std::string(""), "Path to the dataset file");
args.parse_args(argc, argv);
std::strcpy(alexnetConfig.dataset_path,
args.get<std::string>("--dataset").data());
log_app.print("batchSize(%d) workersPerNodes(%d) numNodes(%d)",
ffConfig.batchSize,
ffConfig.workersPerNode,
Expand Down
1 change: 1 addition & 0 deletions examples/cpp/AlexNet/alexnet.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
*/

#include "flexflow/model.h"
#include "utils/parse.h" // Note(lambda):this may have problems
#define MAX_NUM_SAMPLES 4196

using namespace Legion;
Expand Down
106 changes: 39 additions & 67 deletions examples/cpp/DLRM/dlrm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,13 @@

#include "dlrm.h"
#include "hdf5.h"
#include "utils/parse.h" //Note(lambda): this headfile may be false,
#include <sstream>

using namespace Legion;

LegionRuntime::Logger::Category log_app("DLRM");

void parse_input_args(char **argv, int argc, DLRMConfig &apConfig);

DLRMConfig::DLRMConfig(void)
: sparse_feature_size(64), sigmoid_bot(-1), sigmoid_top(-1),
embedding_bag_size(1), loss_threshold(0.0f), arch_interaction_op("cat"),
Expand All @@ -41,6 +40,16 @@ DLRMConfig::DLRMConfig(void)
mlp_top.push_back(2);
}

std::vector<int> parse_string(std::string &inputs) {
std::vector<int> result;
std::stringstream ss(inputs);
std::string word;
while (std::getline(ss, word, '-')) {
result.push_back(std::stoi(word));
}
return result;
}

Tensor create_mlp(FFModel *model,
Tensor const &input,
std::vector<int> ln,
Expand Down Expand Up @@ -122,7 +131,34 @@ void FlexFlow::top_level_task(Task const *task,
InputArgs const &command_args = HighLevelRuntime::get_input_args();
char **argv = command_args.argv;
int argc = command_args.argc;
parse_input_args(argv, argc, dlrmConfig);
ArgsParser args; // Note(lambda): this is a class in utils/parser.h
args.add_argument("--arch-sparse-feature-size", 0, " sparse feature size");
args.add_argument(
"--arch-embedding-size", "32-64-96-128", "embedding size");
args.add_argument("--embedding-bag-size", 1, "embedding bag size");
args.add_argument("--arch-mlp-bot", "13-512-256-64-16", "mlp bot");
args.add_argument("--arch-mlp-top", "512-256-1", "mlp top");
args.add_argument("--loss-threshold", 0.0f, "loss threshold");
args.add_argument("--sigmoid-top", -1, "sigmoid top");
args.add_argument("--sigmoid-bot", -1, "sigmoid bot");
args.add_argument("--arch-interaction-op", "cat", "interaction op");
args.add_argument("--dataset", "", "dataset path");
args.add_argument("--data-size", -1, "data size");
args.parse_args(argc, argv);
dlrmConfig.sparse_feature_size =
args.get<int>("--arch-sparse-feature-size");
dlrmConfig.embedding_size =
parse_string(args.get<std::string>("--arch-embedding-size"));
dlrmConfig.embedding_bag_size = args.get<int>("--embedding-bag-size");
dlrmConfig.mlp_bot = parse_string(args.get<std::string>("--arch-mlp-bot"));
dlrmConfig.mlp_top = parse_string(args.get<std::string>("--arch-mlp-top"));
dlrmConfig.loss_threshold = args.get<float>("--loss-threshold");
dlrmConfig.sigmoid_top = args.get<int>("--sigmoid-top");
dlrmConfig.sigmoid_bot = args.get<int>("--sigmoid-bot");
dlrmConfig.arch_interaction_op =
args.get<std::string>("--arch-interaction-op");
dlrmConfig.dataset_path = args.get<std::string>("--dataset");
dlrmConfig.data_size = args.get<int>("--data-size");
log_app.print("batchSize(%d) workersPerNodes(%d) numNodes(%d)",
ffConfig.batchSize,
ffConfig.workersPerNode,
Expand Down Expand Up @@ -240,70 +276,6 @@ void FlexFlow::top_level_task(Task const *task,
data_loader.num_samples * ffConfig.epochs / run_time);
}

void parse_input_args(char **argv, int argc, DLRMConfig &config) {
for (int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--arch-sparse-feature-size")) {
config.sparse_feature_size = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--arch-embedding-size")) {
std::stringstream ss(std::string(argv[++i]));
std::string word;
config.embedding_size.clear();
while (std::getline(ss, word, '-')) {
config.embedding_size.push_back(std::stoi(word));
}
continue;
}
if (!strcmp(argv[i], "--embedding-bag-size")) {
config.embedding_bag_size = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--arch-mlp-bot")) {
std::stringstream ss(std::string(argv[++i]));
std::string word;
config.mlp_bot.clear();
while (std::getline(ss, word, '-')) {
config.mlp_bot.push_back(std::stoi(word));
}
continue;
}
if (!strcmp(argv[i], "--arch-mlp-top")) {
std::stringstream ss(std::string(argv[++i]));
std::string word;
config.mlp_top.clear();
while (std::getline(ss, word, '-')) {
config.mlp_top.push_back(std::stoi(word));
}
continue;
}
if (!strcmp(argv[i], "--loss-threshold")) {
config.loss_threshold = atof(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--sigmoid-top")) {
config.sigmoid_top = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--sigmoid-bot")) {
config.sigmoid_bot = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--arch-interaction-op")) {
config.arch_interaction_op = std::string(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--dataset")) {
config.dataset_path = std::string(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--data-size")) {
config.data_size = atoi(argv[++i]);
continue;
}
}
}

DataLoader::DataLoader(FFModel &ff,
DLRMConfig const &dlrm,
std::vector<Tensor> const &_sparse_inputs,
Expand Down
32 changes: 10 additions & 22 deletions examples/cpp/DLRM/strategies/dlrm_strategy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
--------------------------*/

#include "strategy.pb.h"
#include "utils/parse.h"
#include <fstream>
#include <iostream>

Expand Down Expand Up @@ -218,31 +219,18 @@ void FFStrategy::export_file(std::string const &output) {
strategy.SerializeToOstream(&outputFile);
}

void parse_input_args(char **argv,
int argc,
int &gpus_per_node,
int &embs_per_node,
int &num_nodes) {
for (int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--gpu")) {
gpus_per_node = std::atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--node")) {
num_nodes = std::atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--emb")) {
embs_per_node = std::atoi(argv[++i]);
continue;
}
}
}

int main(int argc, char **argv) {
GOOGLE_PROTOBUF_VERIFY_VERSION;
int gpus_per_node = 0, embs_per_node = 0, num_nodes = 0;
parse_input_args(argv, argc, gpus_per_node, embs_per_node, num_nodes);
ArgParser args;
args.add_argument("--gpu", 4, "Number of GPUs Per Node");
args.add_argument("--node", 1, "Number of Nodes");
args.add_argument("--emb", 4, "Number of Embeddings Per Node");
args.parse_args(argc, argv);
gpus_per_node = args.get<int>("--gpu");
embs_per_node = args.get<int>("--emb");
num_nodes = args.get<int>("--node");

printf("Number of GPUs Per Node = %d\n", gpus_per_node);
printf("Number of Nodes = %d\n", num_nodes);
printf("Number of Embeddings Per Node = %d\n", embs_per_node);
Expand Down
16 changes: 6 additions & 10 deletions examples/cpp/ResNet/resnet.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
*/

#include "resnet.h"
#include "utils/parse.h"
#include <fstream>
#include <sstream>
#include <string>
Expand All @@ -26,15 +27,6 @@ using FlexFlow::Tensor;

LegionRuntime::Logger::Category log_app("ResNet");

void parse_input_args(char **argv, int argc, ResNetConfig &config) {
for (int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--dataset")) {
config.dataset_path = std::string(argv[++i]);
continue;
}
}
}

Tensor
BottleneckBlock(FFModel &ff, Tensor input, int out_channels, int stride) {
Tensor t = ff.conv2d(input, out_channels, 1, 1, 1, 1, 0, 0, AC_MODE_NONE);
Expand Down Expand Up @@ -68,7 +60,11 @@ void FlexFlow::top_level_task(Task const *task,
InputArgs const &command_args = HighLevelRuntime::get_input_args();
char **argv = command_args.argv;
int argc = command_args.argc;
parse_input_args(argv, argc, resnetConfig);
ArgsParser args;
args.add_argument("--dataset", std::string(""), "Path to the dataset file");
args.parse_args(argc, argv);
resetConfig.dataset_path = args.get<std::string>("--dataset");

log_app.print("batchSize(%d) workersPerNodes(%d) numNodes(%d)",
ffConfig.batchSize,
ffConfig.workersPerNode,
Expand Down
41 changes: 14 additions & 27 deletions examples/cpp/Transformer/transformer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
#include "transformer.h"

using namespace Legion;

LegionRuntime::Logger::Category log_app("Transformer");
using namespace FlexFlow::ArgsParser;

Tensor create_emb(FFModel *model,
Tensor const &input,
Expand Down Expand Up @@ -84,31 +84,6 @@ TransformerConfig::TransformerConfig(void) {
sequence_length = 512;
}

void parse_input_args(char **argv, int argc, TransformerConfig &config) {
for (int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--num-layers")) {
config.num_layers = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--embedding-size")) {
config.embedding_size = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--hidden-size")) {
config.hidden_size = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--num-heads")) {
config.num_heads = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "--sequence-length")) {
config.sequence_length = atoi(argv[++i]);
continue;
}
}
}

void FlexFlow::top_level_task(Task const *task,
std::vector<PhysicalRegion> const &regions,
Context ctx,
Expand All @@ -119,7 +94,19 @@ void FlexFlow::top_level_task(Task const *task,
InputArgs const &command_args = HighLevelRuntime::get_input_args();
char **argv = command_args.argv;
int argc = command_args.argc;
parse_input_args(argv, argc, tfConfig);
ArgParser args;
args.add_argument("--num-layers", 4, "number of layers");
args.add_argument("--embedding-size", 4, "embedding size");
args.add_argument("--hidden-size", 4, "hidden size");
args.add_argument("--num-heads", 4, "number of heads");
args.add_argument("--sequence-length", 4, "sequence length");
args.parse_args(argc, argv) tfConfig.num_layers =
args.get<int>("--num-layers");
tfConfig.embedding_size = args.get<int>("--embedding-size");
tfConfig.hidden_size = args.get<int>("--hidden-size");
tfConfig.num_heads = args.get<int>("--num-heads");
tfConfig.sequence_length = args.get<int>("--sequence-length");

log_app.print("batchSize(%d) workersPerNodes(%d) numNodes(%d)",
ffConfig.batchSize,
ffConfig.workersPerNode,
Expand Down
Loading