Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion tests/test_tensor/_utils/__init__.py

This file was deleted.

1 change: 1 addition & 0 deletions tests/test_tensor/common_utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from ._utils import *
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest

from functools import partial
from _utils import tensor_equal, tensor_shard_equal, set_seed
from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, set_seed

import torch
from torch.nn.parallel import DistributedDataParallel as DDP
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
import pytest
from functools import partial
from _utils import tensor_shard_equal, set_seed

import torch
import torch.multiprocessing as mp

Expand All @@ -15,7 +13,8 @@
from colossalai.nn.optimizer import ColossalaiOptimizer

from tests.components_to_test.registry import non_distributed_component_funcs
from _utils import split_param_row_tp1d, split_param_col_tp1d
from tests.test_tensor.common_utils import tensor_shard_equal, check_equal, set_seed, \
split_param_row_tp1d, split_param_col_tp1d


def run_1d_hybrid_tp(model_name):
Expand Down Expand Up @@ -264,7 +263,6 @@ def run_1d_row_tp(model_name: str):


def _run_pretrain_load():
from _utils import check_equal
from transformers import BertForMaskedLM
set_seed(1)
model_pretrained = BertForMaskedLM.from_pretrained('bert-base-uncased')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from colossalai.tensor import ColoTensor, ComputePattern, ComputeSpec, ShardSpec, ColoTensorSpec
from colossalai.nn.parallel.layers import init_colo_module, check_colo_module
from _utils import tensor_equal, tensor_shard_equal, set_seed
from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, set_seed

import colossalai
from colossalai.utils.cuda import get_current_device
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from functools import partial
from _utils import tensor_shard_equal, tensor_equal, split_param_row_tp1d, split_param_col_tp1d
from tests.test_tensor.common_utils import tensor_shard_equal, tensor_equal, split_param_row_tp1d, split_param_col_tp1d


class Conv1D(nn.Module):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.tensor import ColoParameter, ColoTensorSpec, ProcessGroup
from _utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d
from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d


def run_with_spec(spec_init_func):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor
from _utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d, split_param_row_tp1d
from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d, split_param_row_tp1d


def run_with_spec(spec_init_func, pg: ProcessGroup):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor
from _utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d, split_param_row_tp1d
from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d, split_param_row_tp1d


def run_with_spec(spec_init_func, split_bias):
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion tests/test_tensor/test_parameter.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from colossalai.tensor import ColoParameter, ColoTensor, ColoTensorSpec, ProcessGroup
import torch
import pytest
from _utils import tensor_equal
from common_utils import tensor_equal
import colossalai
from colossalai.utils import free_port

Expand Down
2 changes: 1 addition & 1 deletion tests/test_tensor/test_zero_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from colossalai.utils.model.colo_init_context import ColoInitContext
from colossalai.gemini import ChunkManager
from functools import partial
from _utils import tensor_equal, set_seed, tensor_shard_equal
from tests.test_tensor.common_utils import tensor_equal, set_seed, tensor_shard_equal
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from colossalai.nn.parallel import ZeroDDP
Expand Down