Skip to content

Commit 7d84727

Browse files
committed
doc v0.5 updates
1 parent f8ae4ad commit 7d84727

18 files changed

+375
-231
lines changed

MinkowskiEngine/MinkowskiCoordinateManager.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def set_gpu_allocator(backend: GPUMemoryAllocatorType):
7171
than allocating GPU directly using raw CUDA calls.
7272
7373
By default, the Minkowski Engine uses
74-
:attr:`ME.MemoryManagerBackend.PYTORCH` for memory management.
74+
:attr:`ME.GPUMemoryAllocatorType.PYTORCH` for memory management.
7575
7676
Example::
7777
@@ -84,7 +84,7 @@ def set_gpu_allocator(backend: GPUMemoryAllocatorType):
8484
"""
8585
assert isinstance(
8686
backend, GPUMemoryAllocatorType
87-
), f"Input must be an instance of MemoryManagerBackend not {backend}"
87+
), f"Input must be an instance of GPUMemoryAllocatorType not {backend}"
8888
global _allocator_type
8989
_allocator_type = backend
9090

MinkowskiEngine/MinkowskiSparseTensor.py

+22-6
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,10 @@ class SparseTensor(Tensor):
8989
9090
To use the GPU-backend for coordinate management, the
9191
:attr:`coordinates` must be a torch tensor on GPU. Applying `to(device)`
92-
after a :attr:`MinkowskiEngine.SparseTensor` initialization with a CPU
93-
`coordinates` will waste time and computation for creating a CPU
94-
CoordinateMap since GPU CoordinateMap will be created from scratch.
92+
after :attr:`MinkowskiEngine.SparseTensor` initialization with a CPU
93+
`coordinates` will waste time and computation on creating an unnecessary
94+
CPU CoordinateMap since the GPU CoordinateMap will be created from
95+
scratch as well.
9596
9697
.. warning::
9798
@@ -145,6 +146,10 @@ def __init__(
145146
associated to the features. If not provided, :attr:`coordinate_map_key`
146147
must be provided.
147148
149+
:attr:`tensor_stride` (:attr:`int`, :attr:`list`,
150+
:attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
151+
of the current sparse tensor. By default, it is 1.
152+
148153
:attr:`coordinate_map_key`
149154
(:attr:`MinkowskiEngine.CoordinateMapKey`): When the coordinates
150155
are already cached in the MinkowskiEngine, we could reuse the same
@@ -164,11 +169,22 @@ def __init__(
164169
continuous coordinates will be quantized to define a sparse tensor.
165170
Please refer to :attr:`SparseTensorQuantizationMode` for details.
166171
172+
:attr:`allocator_type`
173+
(:attr:`MinkowskiEngine.GPUMemoryAllocatorType`): Defines the GPU
174+
memory allocator type. By default, it uses the c10 allocator.
175+
176+
:attr:`minkowski_algorithm`
177+
(:attr:`MinkowskiEngine.MinkowskiAlgorithm`): Controls the mode the
178+
minkowski engine runs, Use
179+
:attr:`MinkowskiAlgorithm.MEMORY_EFFICIENT` if you want to reduce
180+
the memory footprint. Or use
181+
:attr:`MinkowskiAlgorithm.SPEED_OPTIMIZED` if you want to make it
182+
run fasterat the cost of more memory.
183+
167184
:attr:`requires_grad` (:attr:`bool`): Set the requires_grad flag.
168185
169-
:attr:`tensor_stride` (:attr:`int`, :attr:`list`,
170-
:attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
171-
of the current sparse tensor. By default, it is 1.
186+
:attr:`device` (:attr:`torch.device`): Set the device the sparse
187+
tensor is defined.
172188
173189
"""
174190
# Type checks

MinkowskiEngine/MinkowskiTensor.py

+10-1
Original file line numberDiff line numberDiff line change
@@ -99,17 +99,26 @@ def set_sparse_tensor_operation_mode(operation_mode: SparseTensorOperationMode):
9999
_sparse_tensor_operation_mode = operation_mode
100100

101101

102-
def sparse_tensor_operation_mode():
102+
def sparse_tensor_operation_mode() -> SparseTensorOperationMode:
103+
r"""Return the current sparse tensor operation mode.
104+
"""
103105
global _sparse_tensor_operation_mode
104106
return copy.deepcopy(_sparse_tensor_operation_mode)
105107

106108

107109
def global_coordinate_manager():
110+
r"""Return the current global coordinate manager
111+
"""
108112
global _global_coordinate_manager
109113
return _global_coordinate_manager
110114

111115

112116
def set_global_coordinate_manager(coordinate_manager):
117+
r"""Set the global coordinate manager.
118+
119+
:attr:`MinkowskiEngine.CoordinateManager` The coordinate manager which will
120+
be set to the global coordinate manager.
121+
"""
113122
global _global_coordinate_manager
114123
_global_coordinate_manager = coordinate_manager
115124

MinkowskiEngine/MinkowskiTensorField.py

+20-6
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,16 @@ def __init__(
7171
associated to the features. If not provided, :attr:`coordinate_map_key`
7272
must be provided.
7373
74-
:attr:`coordinate_map_key`
74+
:attr:`tensor_stride` (:attr:`int`, :attr:`list`,
75+
:attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
76+
of the current sparse tensor. By default, it is 1.
77+
78+
:attr:`coordinate_field_map_key`
7579
(:attr:`MinkowskiEngine.CoordinateMapKey`): When the coordinates
7680
are already cached in the MinkowskiEngine, we could reuse the same
7781
coordinate map by simply providing the coordinate map key. In most
7882
case, this process is done automatically. When you provide a
79-
`coordinate_map_key`, `coordinates` will be be ignored.
83+
`coordinate_field_map_key`, `coordinates` will be be ignored.
8084
8185
:attr:`coordinate_manager`
8286
(:attr:`MinkowskiEngine.CoordinateManager`): The MinkowskiEngine
@@ -90,12 +94,22 @@ def __init__(
9094
continuous coordinates will be quantized to define a sparse tensor.
9195
Please refer to :attr:`SparseTensorQuantizationMode` for details.
9296
93-
:attr:`requires_grad` (:attr:`bool`): Set the requires_grad flag.
97+
:attr:`allocator_type`
98+
(:attr:`MinkowskiEngine.GPUMemoryAllocatorType`): Defines the GPU
99+
memory allocator type. By default, it uses the c10 allocator.
94100
95-
:attr:`tensor_stride` (:attr:`int`, :attr:`list`,
96-
:attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
97-
of the current sparse tensor. By default, it is 1.
101+
:attr:`minkowski_algorithm`
102+
(:attr:`MinkowskiEngine.MinkowskiAlgorithm`): Controls the mode the
103+
minkowski engine runs, Use
104+
:attr:`MinkowskiAlgorithm.MEMORY_EFFICIENT` if you want to reduce
105+
the memory footprint. Or use
106+
:attr:`MinkowskiAlgorithm.SPEED_OPTIMIZED` if you want to make it
107+
run fasterat the cost of more memory.
108+
109+
:attr:`requires_grad` (:attr:`bool`): Set the requires_grad flag.
98110
111+
:attr:`device` (:attr:`torch.device`): Set the device the sparse
112+
tensor is defined.
99113
"""
100114
# Type checks
101115
assert isinstance(features, torch.Tensor), "Features must be a torch.Tensor"

MinkowskiEngine/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@
7373
SparseTensorQuantizationMode,
7474
set_sparse_tensor_operation_mode,
7575
sparse_tensor_operation_mode,
76+
global_coordinate_manager,
77+
set_global_coordinate_manager,
7678
clear_global_coordinate_manager,
7779
)
7880

MinkowskiEngine/utils/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,5 +23,5 @@
2323
# of the code.
2424
from .quantization import sparse_quantize, ravel_hash_vec, fnv_hash_vec, unique_coordinate_map
2525
from .collation import SparseCollation, batched_coordinates, sparse_collate, batch_sparse_collate
26-
from .coords import get_coords_map
26+
# from .coords import get_coords_map
2727
from .init import kaiming_normal_

docs/benchmark.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Benchmark
22

3-
We report the feed forward and backward pass time of a convolution layer, and a small U-network. Note that the kernel map can be reused for other layers with the same tensor-stride, stride, and kernel offsets, thus the time reported in this page can be amortized across all layers used in a large nueral network.
3+
We report the feed forward and backward pass time of a convolution layer, and a small U-network for v0.4.3. Note that the kernel map can be reused for other layers with the same tensor-stride, stride, and kernel offsets, thus the time reported in this page can be amortized across all layers used in a large nueral network.
44

55
We use a Titan X for the experiments.
66

docs/coords.rst

+10-10
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,32 @@
11
Coordinate Management
22
=====================
33

4-
CoordsKey
5-
---------
4+
CoordinateMapKey
5+
----------------
66

7-
.. autoclass:: MinkowskiEngine.CoordsKey
7+
.. autoclass:: MinkowskiEngine.CoordinateMapKey
88
:members:
99
:undoc-members:
1010
:exclude-members: __repr__
1111

1212
.. automethod:: __init__
1313

1414

15-
CoordsManager
16-
-------------
15+
CoordinateManager
16+
-----------------
1717

18-
.. autoclass:: MinkowskiEngine.CoordsManager
18+
.. autoclass:: MinkowskiEngine.CoordinateManager
1919
:members:
2020
:undoc-members:
2121
:exclude-members: __repr__
2222

2323
.. automethod:: __init__
2424

2525

26-
Coordinate GPU Memory Manager
27-
-----------------------------
26+
GPU Memory Allocator
27+
--------------------
2828

29-
.. autoclass:: MinkowskiEngine.MemoryManagerBackend
29+
.. autoclass:: MinkowskiEngine.GPUMemoryAllocatorType
3030
:members:
3131

32-
.. autofunction:: MinkowskiEngine.MinkowskiCoords.set_memory_manager_backend
32+
.. autofunction:: MinkowskiEngine.set_gpu_allocator

docs/demo/interop.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ a min-batch.
3232
kernel_size=3,
3333
stride=2,
3434
dilation=1,
35-
has_bias=False,
35+
bias=False,
3636
dimension=D), ME.MinkowskiBatchNorm(64), ME.MinkowskiReLU(),
3737
ME.MinkowskiConvolution(
3838
in_channels=64,
@@ -66,7 +66,7 @@ accessing the features of the sparse tensor
6666
6767
# Get new data
6868
coords, feat, label = data_loader()
69-
input = ME.SparseTensor(feat, coords=coords).to(device)
69+
input = ME.SparseTensor(features=feat, coordinates=coords, device=device)
7070
label = label.to(device)
7171
7272
# Forward

0 commit comments

Comments
 (0)