-
Notifications
You must be signed in to change notification settings - Fork 23
Open
Description
**when this operator is called repeatedly during iterations, the GPU memory usage keeps increasing, finally the error "cuda 3D Allocation error:out of memory "occurs. And this phenomenon only occurs in ubuntu system, not in window system **
class projector_t(nn.Module):
def init(self):
super(projector_t, self).init()
def forward(self, sino, index, batchSize, d_angle=0):
device_name = "cuda:1"
device = torch.device(device_name)
proj = Projector(forward_project=False, use_static=True, use_gpu=True, gpu_device=device, batch_size=batchSize)
# proj = FBP(forward_FBP=True, use_static=True, use_gpu=True, gpu_device=device, batch_size=batchSize)
numCols = 1024
numTurns = 1024 / 1024
numAngles = int(1024 * numTurns)
pixelHeight = 1.12
pixelWidth = 1.12
# Set the number of detector rows
numRows = 32
phis = proj.leapct.setAngleArray(numAngles, 360.0 * numTurns)
# Set the scanner geometry
proj.leapct.set_conebeam(numAngles, numRows, numCols, pixelHeight, pixelWidth, 0.5 * (numRows - 1),
0.5 * (numCols - 1) - 4.4912, phis+d_angle, 570, 1005, tau=0.0)
proj.leapct.set_curvedDetector()
# Set the helical pitch.
proj.leapct.set_normalizedHelicalPitch(1.6)
pixels = 512
slices = 5
voxelWidth = 0.6
voxelHeight = 1.0
proj.leapct.set_volume(pixels, pixels, slices, voxelWidth, voxelHeight)
proj.allocate_batch_data()
g = sino
f = proj.leapct.allocateVolume()
f = torch.from_numpy(f).to(device)
# proj.leapct.set_FORBILD(f, True)
f[:] = 0.0
# Reconstruct the data
proj.leapct.FBP(g, f)
# f = proj(g)
del sino, g, proj
return f
Metadata
Metadata
Assignees
Labels
No labels