TensorRT 5.1.5
CUDA 10.0
TensorFlow 1.13.1
tensorflow 模型转uff出错
uff.model.exceptions.UffException: Transpose permutation has op ConcatV2, expected Const. Only constant permuations are supported in UFF.
但是根据官方文档
ConcatV2应该是支持的
uff坑比较多。其实最简单,比较通用的方式是onnx -> tensorrt
模型
class TFUnetCleanModel:
def __init__(self, image_size, image_channel, n_class, layer_count):
self.image_channel = image_channel
self.n_class = n_class
self.image_size = image_size
self.init_weight()
self.predicts = self.build_model(layer_count=layer_count)
def init_weight(self):
with tf.name_scope('inputs'):
self.image_feature = tf.placeholder(tf.float32,
[None, self.image_size, self.image_size, self.image_channel],
name='image_feature')
def convolution(self, input_, num_filters, kernel_size):
conv = tf.layers.conv2d(input_,
num_filters,
kernel_size,
padding="same", activation=tf.nn.relu)
conv = tf.layers.batch_normalization(conv)
return conv
def max_pool(self, input_, pool_size, stride_size):
conv = tf.layers.max_pooling2d(input_, pool_size, stride_size, padding='same')
return conv
def upsample_and_concat(self, layer_upper, layer_down, output_channels):
deconv = tf.layers.conv2d_transpose(layer_upper, output_channels,
kernel_size=(2, 2),
strides=(2, 2))
deconv_output = tf.concat([layer_down, deconv], -1)
return deconv_output
def build_model(self, layer_count, features_root=64):
"""
Creates a new convolutional unet for the given parametrization.
:param layer_count: number of layers in the net
:param features_root: number of features in the first layer
"""
last_down_input = self.image_feature
down_layers = OrderedDict()
for layer in range(0, layer_count):
with tf.name_scope("down_conv_{}".format(str(layer))):
num_filters = 2 ** layer * features_root
last_down_input = self.convolution(last_down_input, num_filters, (3, 3))
last_down_input = self.convolution(last_down_input, num_filters, (3, 3))
down_layers[layer] = last_down_input
last_down_input = self.max_pool(last_down_input, pool_size=(2, 2), stride_size=(2, 2))
print("down_conv_{}.shape:{}".format(str(layer), last_down_input.get_shape()))
num_filters = 2 ** layer_count * features_root
last_up_input = self.convolution(last_down_input, num_filters, (3, 3))
last_up_input = self.convolution(last_up_input, num_filters, (3, 3))
print("last_up_input.shape:{}".format(last_up_input.get_shape()))
for layer in range(layer_count, 0, -1):
with tf.name_scope("up_conv_{}".format(str(layer - 1))):
num_filters = 2 ** (layer - 1) * features_root
last_up_input = self.upsample_and_concat(last_up_input,
down_layers[layer - 1],
num_filters)
print("up_sample_{}.shape:{}".format(str(layer), last_up_input.get_shape()))
last_up_input = self.convolution(last_up_input, num_filters, (3, 3))
last_up_input = self.convolution(last_up_input, num_filters, (3, 3))
# last_up_input = tf.nn.dropout(last_up_input, rate=1 - self.keep_prob)
print("up_conv_{}.shape:{}".format(str(layer), last_up_input.get_shape()))
last_up_input = self.convolution(last_up_input, 16, (3, 3))
return last_up_input