@@ -507,9 +507,6 @@ def rand(*shape, device:str|None=None, dtype:DTypeLike|None=None, contiguous:boo
507
507
if (numel := prod (shape )) == 0 : return Tensor .zeros (shape , device = _device , dtype = dtype , ** kwargs )
508
508
num = ceildiv (numel * dtype .itemsize , 4 )
509
509
510
- # when using MOCKGPU and NV generate rand on CPU
511
- if getenv ("MOCKGPU" ) and device .startswith ("NV" ): device = "CPU"
512
-
513
510
# generate per device seeds and rng counter if we haven't seen this device yet
514
511
if device not in Tensor ._device_seeds :
515
512
Tensor ._device_seeds [device ] = Tensor (
@@ -532,12 +529,7 @@ def rand(*shape, device:str|None=None, dtype:DTypeLike|None=None, contiguous:boo
532
529
one = Tensor .ones_like (bits , device = bits .device , dtype = dtype ).bitcast (uint_dtype )
533
530
bits = bits .rshift ((dtype .itemsize * 8 ) - nmant ).bitwise_or (one )
534
531
# bitcast back to the original dtype and reshape
535
- out = bits .bitcast (dtype )[:numel ].sub (1 ).reshape (shape )
536
-
537
- # move back to the original device if we were using MOCKGPU
538
- if getenv ("MOCKGPU" ) and _device : out = out .to (_device )
539
-
540
- out .requires_grad = kwargs .get ("requires_grad" )
532
+ out = bits .bitcast (dtype )[:numel ].sub (1 ).reshape (shape ).requires_grad_ (kwargs .get ("requires_grad" ))
541
533
return out .contiguous () if contiguous else out
542
534
543
535
# ***** creation helper functions *****
0 commit comments