Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
70 commits
Select commit Hold shift + click to select a range
c36d42c
try to implement the get-output_shape in attention.cc
lambda7xx Oct 5, 2023
959c50f
leave the implementation
lambda7xx Oct 7, 2023
be7e04f
implement the get output_shape for batch_matmul
lambda7xx Oct 8, 2023
f64bd64
implement the batch_norm
lambda7xx Oct 8, 2023
30d4255
implement the cast
lambda7xx Oct 8, 2023
1e5d742
implement the combine
lambda7xx Oct 8, 2023
5f4cf5c
add concat
lambda7xx Oct 8, 2023
ba8386f
try to implement the conv2d
lambda7xx Oct 8, 2023
5efd4ff
add conv_2d
lambda7xx Oct 8, 2023
fbdb407
add dropout
lambda7xx Oct 8, 2023
32aa332
add element binary
lambda7xx Oct 8, 2023
9a85d59
add elemenet unary
lambda7xx Oct 8, 2023
698a729
add embedding
lambda7xx Oct 8, 2023
72f43bc
add flat
lambda7xx Oct 8, 2023
e0f05be
leave the get_otput_shape for gather
lambda7xx Oct 9, 2023
1d18f35
skip groupby
lambda7xx Oct 9, 2023
c81d5f8
add layer norm
lambda7xx Oct 9, 2023
d721da7
add linear
lambda7xx Oct 9, 2023
23a266e
add pool2d
lambda7xx Oct 9, 2023
ee9bbaa
leave the reduce
lambda7xx Oct 9, 2023
5e354ba
add reduction
lambda7xx Oct 9, 2023
e95f195
add reshape
lambda7xx Oct 9, 2023
f053a20
add reverse draft
lambda7xx Oct 9, 2023
6f6f61e
add layer norm valid check
lambda7xx Oct 9, 2023
d47198e
add replicate
lambda7xx Oct 11, 2023
ea0297e
add softmax
lambda7xx Oct 11, 2023
125a9ad
add split
lambda7xx Oct 11, 2023
61e09c6
add topk
lambda7xx Oct 11, 2023
f3d6524
add transpose
lambda7xx Oct 11, 2023
590dac5
update the batch matmul
lambda7xx Oct 11, 2023
69c13ba
add valid check for conv_2d
lambda7xx Oct 11, 2023
b56d9c0
format the code and get_output_shape draft version0.1
lambda7xx Oct 11, 2023
1f8d85d
leave attention to implement
lambda7xx Oct 17, 2023
c962564
add batch_matmul
lambda7xx Oct 17, 2023
ab7efc8
add batch_matmul
lambda7xx Oct 17, 2023
8c7395d
add batch norm
lambda7xx Oct 17, 2023
1f1703c
refine the batch_matmul
lambda7xx Oct 17, 2023
9406a0b
refine the batch_matmul
lambda7xx Oct 17, 2023
9a84d50
refine the conv2d
lambda7xx Oct 17, 2023
3208f5b
delete the invalid
lambda7xx Oct 17, 2023
8a6b29e
add gather
lambda7xx Oct 17, 2023
a8c75ec
add groupy
lambda7xx Oct 17, 2023
bb615bd
implement the layer_norm
lambda7xx Oct 17, 2023
bc823f4
add linear
lambda7xx Oct 17, 2023
f2a50e3
add pool2d
lambda7xx Oct 17, 2023
2408b37
add repartition
lambda7xx Oct 17, 2023
9c93f07
remove aggregate
lambda7xx Oct 17, 2023
7377bee
remove aggregate
lambda7xx Oct 17, 2023
a5f1a0e
add reshape
lambda7xx Oct 17, 2023
812708f
conv2d done
lambda7xx Oct 17, 2023
2bdaf09
add more shape
lambda7xx Oct 18, 2023
50ec41f
try to implement the attention
lambda7xx Oct 18, 2023
2e2533a
leave reduce
lambda7xx Oct 18, 2023
985eeac
fix the concat
lambda7xx Oct 18, 2023
aff7b00
fix some error
lambda7xx Oct 18, 2023
e59975e
remove the empty blank
lambda7xx Oct 18, 2023
7da6b50
some update
lambda7xx Oct 19, 2023
40ffcd9
add new batch matmul
lambda7xx Oct 27, 2023
f68c83b
add concat
lambda7xx Oct 27, 2023
5ef2497
add conv_2d
lambda7xx Oct 27, 2023
7e23eb3
add element binary
lambda7xx Oct 27, 2023
9bb4de6
add embedding
lambda7xx Oct 28, 2023
51a9cb7
add flat
lambda7xx Oct 28, 2023
cdb38d0
add pool2d
lambda7xx Oct 28, 2023
9f49b95
add reduce
lambda7xx Oct 28, 2023
a715cdf
implement the reshape
lambda7xx Oct 28, 2023
326f7f3
implement the split
lambda7xx Oct 28, 2023
6c82466
add transpose
lambda7xx Oct 28, 2023
ce7ba69
leave the attention
lambda7xx Oct 28, 2023
fec4928
add attention
lambda7xx Oct 28, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion deps/fmt
Submodule fmt updated 70 files
+0 −8 .github/dependabot.yml
+2 −3 .github/pull_request_template.md
+0 −30 .github/workflows/cifuzz.yml
+1 −9 .github/workflows/doc.yml
+13 −30 .github/workflows/linux.yml
+2 −17 .github/workflows/macos.yml
+0 −65 .github/workflows/scorecard.yml
+4 −12 .github/workflows/windows.yml
+41 −98 CMakeLists.txt
+1 −668 ChangeLog.rst
+1 −1 LICENSE.rst
+28 −42 README.rst
+145 −200 doc/api.rst
+6 −5 doc/build.py
+8 −134 doc/syntax.rst
+1 −1 include/fmt/args.h
+327 −466 include/fmt/chrono.h
+37 −18 include/fmt/color.h
+104 −27 include/fmt/compile.h
+1,139 −738 include/fmt/core.h
+152 −91 include/fmt/format-inl.h
+788 −1,081 include/fmt/format.h
+71 −44 include/fmt/os.h
+50 −22 include/fmt/ostream.h
+174 −201 include/fmt/printf.h
+145 −158 include/fmt/ranges.h
+37 −331 include/fmt/std.h
+33 −62 include/fmt/xchar.h
+29 −40 src/fmt.cc
+5 −1 src/format.cc
+62 −99 src/os.cc
+3 −3 support/Vagrantfile
+1 −0 support/bazel/.bazelrc
+1 −1 support/bazel/.bazelversion
+2 −2 support/bazel/BUILD.bazel
+4 −5 support/bazel/README.md
+1 −1 support/build.gradle
+54 −0 support/cmake/cxx14.cmake
+0 −7 support/rst2md.py
+3 −15 test/CMakeLists.txt
+1 −1 test/add-subdirectory-test/CMakeLists.txt
+1 −1 test/args-test.cc
+26 −386 test/chrono-test.cc
+2 −2 test/compile-error-test/CMakeLists.txt
+0 −1 test/compile-fp-test.cc
+40 −22 test/compile-test.cc
+239 −119 test/core-test.cc
+0 −2 test/enforce-checks-test.cc
+1 −1 test/find-package-test/CMakeLists.txt
+91 −27 test/format-impl-test.cc
+329 −285 test/format-test.cc
+1 −1 test/fuzzing/CMakeLists.txt
+0 −2 test/gtest-extra-test.cc
+6 −1 test/gtest-extra.h
+7 −1 test/gtest/CMakeLists.txt
+1 −1 test/gtest/gmock-gtest-all.cc
+2 −2 test/mock-allocator.h
+88 −36 test/module-test.cc
+56 −15 test/os-test.cc
+47 −16 test/ostream-test.cc
+7 −1 test/posix-mock-test.cc
+2 −0 test/posix-mock.h
+37 −13 test/printf-test.cc
+35 −138 test/ranges-test.cc
+1 −1 test/scan-test.cc
+14 −17 test/scan.h
+1 −1 test/static-export-test/CMakeLists.txt
+20 −198 test/std-test.cc
+6 −2 test/util.h
+55 −118 test/xchar-test.cc
302 changes: 0 additions & 302 deletions lib/kernels/src/cuda/aggregate_spec_kernels.cu

This file was deleted.

1 change: 1 addition & 0 deletions lib/op-attrs/include/op-attrs/ops/attention.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ struct MultiHeadAttentionAttrs {
req<float> dropout;
req<bool> bias, add_bias_kv, add_zero_attn;
};

FF_VISITABLE_STRUCT(MultiHeadAttentionAttrs,
embed_dim,
num_heads,
Expand Down
3 changes: 3 additions & 0 deletions lib/op-attrs/include/op-attrs/ops/batch_matmul.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ FF_VISITABLE_STRUCT(BatchMatmulAttrs, a_seq_length_dim, b_seq_length_dim);

CHECK_VALID_OP_ATTR(BatchMatmulAttrs);

ParallelTensorShape get_output_shape(BatchMatmulAttrs const &,
ParallelTensorShape const &,
ParallelTensorShape const &);
} // namespace FlexFlow

#endif
3 changes: 2 additions & 1 deletion lib/op-attrs/include/op-attrs/ops/batch_norm.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ struct BatchNormAttrs {
};
FF_VISITABLE_STRUCT(BatchNormAttrs, relu);

ParallelTensorShape get_output_shape(BatchNormAttrs const &);
ParallelTensorShape get_output_shape(BatchNormAttrs const &,
ParallelTensorShape const &);

CHECK_VALID_OP_ATTR(BatchNormAttrs);

Expand Down
3 changes: 3 additions & 0 deletions lib/op-attrs/include/op-attrs/ops/cast.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ struct CastAttrs {
};
FF_VISITABLE_STRUCT(CastAttrs, dtype);

ParallelTensorShape get_output_shape(CastAttrs const &,
ParallelTensorShape const &);

CHECK_VALID_OP_ATTR(CastAttrs);
} // namespace FlexFlow

Expand Down
3 changes: 3 additions & 0 deletions lib/op-attrs/include/op-attrs/ops/combine.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ struct CombineAttrs {
FF_VISITABLE_STRUCT(CombineAttrs, combine_dim, combine_degree);
CHECK_VALID_OP_ATTR(CombineAttrs);

ParallelTensorShape get_output_shape(CombineAttrs const &,
ParallelTensorShape const &);

} // namespace FlexFlow

#endif
2 changes: 2 additions & 0 deletions lib/op-attrs/include/op-attrs/ops/concat.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ struct ConcatAttrs {
FF_VISITABLE_STRUCT(ConcatAttrs, axis, num_inputs);
CHECK_VALID_OP_ATTR(ConcatAttrs);

ParallelTensorShape get_output_shape(ConcatAttrs const &,
std::vector<ParallelTensorShape> const &);
} // namespace FlexFlow

#endif
3 changes: 3 additions & 0 deletions lib/op-attrs/include/op-attrs/ops/conv_2d.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ CHECK_VALID_OP_ATTR(Conv2DAttrs);
TensorShape get_kernel_shape(Conv2DAttrs const &, TensorShape const &);
TensorShape get_bias_shape(Conv2DAttrs const &, TensorShape const &);

ParallelTensorShape get_output_shape(Conv2DAttrs const &,
ParallelTensorShape const &);

} // namespace FlexFlow

#endif
3 changes: 3 additions & 0 deletions lib/op-attrs/include/op-attrs/ops/dropout.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ struct DropoutAttrs {
FF_VISITABLE_STRUCT(DropoutAttrs, rate, seed);
CHECK_VALID_OP_ATTR(DropoutAttrs);

ParallelTensorShape get_output_shape(DropoutAttrs const &,
ParallelTensorShape const &);

} // namespace FlexFlow

#endif
Loading