import torch
# conv_BN_ReLu
class ConvolutionalLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
super(ConvolutionalLayer, self).__init__()
self.sub_module = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU()
)
def forward(self, x):
return self.sub_module(x)
# 残差1x1 -> 3x3 -> 1x1
class Residual(nn.Module):
def __init__(self, in_channels, out_channels):
super(Residual, self).__init__()
self.sub_module = nn.Sequential(
ConvolutionalLayer(in_channels, out_channels, 1, 1, 0),
ConvolutionalLayer(out_channels, out_channels, 3, 1, 1),
ConvolutionalLayer(out_channels, in_channels, 1, 1, 0),
)
def forward(self, x):
return x + self.sub_module(x)
# 预处理,减小1/4
class Preprocessing(nn.Module):
def __init__(self, in_channels, out_channels):
super(Preprocessing, self).__init__()
self.sub_module = nn.Sequential(
ConvolutionalLayer(in_channels, out_channels, 3, 2, 1),
Residual(out_channels, out_channels),
nn.MaxPool2d(2, 2)
)
def forward(self, x):
return self.sub_module(x)
# 下采样1/2
class DownSampling(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownSampling, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 2, 1),
nn.LeakyReLU()
)
def forward(self, x):
return self.layer(x)
# 上采样x2
class UpSampling(nn.Module):
def __init__(self, in_channels, out_channels):
super(UpSampling, self).__init__()
self.layer = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, 3, 2, 1, 1)
)
def forward(self, x):
return self.layer(x)
class HourGlass(nn.Module):
def __init__(self, nChannels, numReductions=4, numModules=2):
super(HourGlass, self).__init__()
self.nChannels = nChannels
self.numReductions = numReductions
self.numModules = numModules
skip = []
skip.append(nn.Conv2d(self.nChannels[self.numReductions], self.nChannels[self.numReductions-1], 1, 1))
for _ in range(self.numModules):
skip.append(Residual(self.nChannels[self.numReductions-1], self.nChannels[self.numReductions-1]))
self.skip = nn.Sequential(*skip)
self.down = DownSampling(self.nChannels[self.numReductions], self.nChannels[self.numReductions-1])
afterpool = []
for _ in range(self.numModules):
afterpool.append(Residual(self.nChannels[self.numReductions-1], self.nChannels[self.numReductions-1]))
self.afterpool = nn.Sequential(*afterpool)
if numReductions > 1:
self.hg = HourGlass(self.nChannels, self.numReductions - 1, self.numModules)
else:
num1res = []
for _ in range(self.numModules):
num1res.append(Residual(self.nChannels[self.numReductions-1], self.nChannels[self.numReductions-1]))
self.num1res = nn.Sequential(*num1res)
lowers = []
lowers.append(nn.Conv2d(self.nChannels[self.numReductions-1], self.nChannels[self.numReductions], 1, 1))
for _ in range(self.numModules):
lowers.append(Residual(self.nChannels[self.numReductions], self.nChannels[self.numReductions]))
self.lowers = nn.Sequential(*lowers)
self.up = UpSampling(self.nChannels[self.numReductions], self.nChannels[self.numReductions-1])
self.merge = nn.Conv2d(self.nChannels[self.numReductions-1], self.nChannels[self.numReductions], 1, 1)
def forward(self, x):
out1 = self.skip(x) # 2个残差
out2 = self.down(x) # 下采样1/2
out2 = self.afterpool(out2) # 2个残差
if self.numReductions > 1:
out2 = self.hg(out2) # 递归调用
else:
out2 = self.num1res(out2) # 2个残差
out2 = self.lowers(out2) # 2个残差
out2 = self.up(out2) # 上采样x2
out2 = out1 + out2
out3 = self.merge(out2)
return out3
class StackHourGlass(nn.Module):
def __init__(self, stack_num, nChannels):
super(StackHourGlass, self).__init__()
self.stack_num = stack_num
self.nChannels = nChannels
self.hg = HourGlass([nChannels*8, nChannels*4, nChannels*2, nChannels, nChannels])
self.res = ConvolutionalLayer(self.nChannels, 1, 1, 1, 0)
self.joints = ConvolutionalLayer(1, self.nChannels, 1, 1, 0)
def forward(self, x):
out = []
for i in range(self.stack_num):
x1 = self.hg(x)
x1 = self.res(x1)
out.append(x1)
if i != self.stack_num - 1:
x = x + self.joints(x1)
return out
class Main(nn.Module):
def __init__(self, stack_num=2, nChannels=8):
super(Main, self).__init__()
self.pre_process = Preprocessing(in_channels=3, out_channels=nChannels)
self.hg = StackHourGlass(stack_num, nChannels)
def forward(self, x):
pre_process = self.pre_process(x)
hg = self.hg(pre_process)
return hg
if __name__ == "__main__":
hg = Main().cuda()
x = torch.randn((2, 3, 512, 512)).cuda()
out = hg(x)
print(out)
StackHourGlass的pytorch实现
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- 作者:aiqiu_gogogo来源:CSDN原文:https://blog.csdn.net/aiqiu_gogo...
- 在本教程中,您将使用PyTorch框架来介绍深度学习,并且根据其结论,您可以轻松地将其应用到您的深度学习模型中。脸...
- 原文链接 PyTorch由于使用了强大的GPU加速的Tensor计算(类似numpy)和基于tape的autogr...