Skip to content

Commit b77535a

Browse files
0.1.3 fix a bug to integrate gpus for dgl models
1 parent fc750f7 commit b77535a

File tree

2 files changed

+5
-1
lines changed

2 files changed

+5
-1
lines changed

DeepPurpose/encoders.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -351,6 +351,7 @@ def __init__(self, in_feats, hidden_feats=None, max_degree = None, activation=No
351351
self.transform = nn.Linear(predictor_hidden_size * 2, predictor_dim)
352352

353353
def forward(self, bg):
354+
bg = bg.to(device)
354355
feats = bg.ndata.pop('h')
355356
node_feats = self.gnn(bg, feats)
356357
node_feats = self.node_to_graph(node_feats)
@@ -373,6 +374,7 @@ def __init__(self, predictor_dim=None):
373374
self.transform = nn.Linear(300, predictor_dim)
374375

375376
def forward(self, bg):
377+
bg = bg.to(device)
376378
node_feats = [
377379
bg.ndata.pop('atomic_number'),
378380
bg.ndata.pop('chirality_type')
@@ -400,6 +402,7 @@ def __init__(self, predictor_dim=None):
400402
self.transform = nn.Linear(300, predictor_dim)
401403

402404
def forward(self, bg):
405+
bg = bg.to(device)
403406
node_feats = [
404407
bg.ndata.pop('atomic_number'),
405408
bg.ndata.pop('chirality_type')
@@ -432,6 +435,7 @@ def __init__(self, node_feat_size, edge_feat_size, num_layers = 2, num_timesteps
432435
self.transform = nn.Linear(graph_feat_size, predictor_dim)
433436

434437
def forward(self, bg):
438+
bg = bg.to(device)
435439
node_feats = bg.ndata.pop('h')
436440
edge_feats = bg.edata.pop('e')
437441

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def readme():
1515
name="DeepPurpose",
1616
packages = ['DeepPurpose'],
1717
package_data={'DeepPurpose': ['ESPF/*']},
18-
version="0.1.2",
18+
version="0.1.3",
1919
author="Kexin Huang, Tianfan Fu",
2020
license="BSD-3-Clause",
2121
author_email="kexinhuang@hsph.harvard.edu",

0 commit comments

Comments
 (0)