四维tensor的模型中有gru,如何转换
error log | 日志或报错信息 | ログ
(venv) PS D:\pycharm_project> pnnx ./model.pt inputshape=[1,63,33,24] pnnxparam = ./model.pnnx.param pnnxbin = ./model.pnnx.bin pnnxpy = ./model_pnnx.py pnnxonnx = ./model.pnnx.onnx ncnnparam = ./model.ncnn.param ncnnbin = ./model.ncnn.bin ncnnpy = ./model_ncnn.py fp16 = 1 optlevel = 2 device = cpu inputshape = [1,63,33,24]f32 inputshape2 = customop = moduleop = ############# pass_level0
############# pass_level5 ############# pass_ncnn insert_reshape_linear 4 stack along batch axis 1 is not supported unbind along batch axis 1 is not supported reshape tensor with batch index 1 is not supported yet! reshape tensor with batch index 1 is not supported yet! ignore Slice unbind_0 param dim=1 ignore Concat stack_0 param dim=1
model | 模型 | モデル
- original model
- import torch import torch.nn as nn
class Model(nn.Module): def init(self, input_size, width, hidden_size): super().init() self.input_size = input_size self.width = width self.hidden_size = hidden_size
self.intra_fc = nn.Linear(hidden_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.intra_norm = nn.LayerNorm((width, hidden_size), eps=1e-8)
def forward(self, x):
intra_x = x # (B, T, F, C)
intra_x = self.intra_fc(intra_x)
output = []
batch_size, seq_len, _, _ = x.size()
for i in range(seq_len):
output.append(self.gru(intra_x[:,i,...])[0])
intra_x = torch.stack(output, dim=1)
intra_x = self.intra_norm(intra_x)
return intra_x
Press the green button in the gutter to run the script.
if name == 'main': # create data x = torch.randn(1, 24, 63, 33)
# initialize model
m = Model(24,33,24)
m_without_layernorm = Model_without_layernorm(24,33,24)
y = x.permute(0, 2, 3, 1)
out0 = m.forward(y)
m.eval()
model = torch.jit.trace(m.cpu(), (y), check_trace=True)
pt_model_path = "model.pt"
model.save(pt_model_path)
how to reproduce | 复现步骤 | 再現方法
- pnnx ./model.pt inputshape=[1,63,33,24]