Comments (5)
Hi,
side_output_5
works at the largest scale of the reconstructing the edges and its quality would depend on the deconvolutions layer. I have used very simple version of the deconv layers but yes they could be improved. I plan to update it soon.
from holy-edge.
Hi,
I'd tried to use slim to recode your work, but failed. side_1 was same as yours, but side_4 and side_5 is too bad. There are my code for 'side_layer'..what's problem with those code, can you help me.
def side_layer(self, inputs, scope, num_output):
side = slim.conv2d(inputs, 1, [1,1], stride=1,
activation_fn=None,
weights_initializer=tf.zeros_initializer(),
biases_initializer=tf.zeros_initializer(),
scope=scope+'_conv')
side = slim.conv2d_transpose(side,1, [2num_output,2num_output],
activation_fn=None,
biases_initializer=None,
stride = num_output,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
scope=scope+'_transopse')
from holy-edge.
Hi,
Could you please paste the complete code snippet here formatted with github's recommended methods. Also could you please explain what are you trying to improve and how. This way I can read through your code and give appropriate feedback.
from holy-edge.
Thanks, I want to use slim library to rebuild your work. This is the architecture, and there is some wrong I can not get your results same as your.
`class Vgg16():
def init(self, cfgs, image, label):
self.image = image
self.edgemaps = label
self.cfgs = cfgs
self.define_model()
def define_model(self):
weight_decay = 0.0002
start_time = time.time()
with slim.arg_scope([slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
):
with tf.variable_scope('hed_model'):
self.conv1 = slim.repeat(self.image, 2, slim.conv2d, 64, kernel_size=3, stride=1,scope='conv1')
self.side_1 = self.side_layer(self.conv1, 'side_1', 1)
self.pool1 = slim.max_pool2d(self.conv1, kernel_size=2, scope='pool1')
print('Added CONV-BLOCK-1+SIDE-1')
self.conv2 = slim.repeat(self.pool1, 2, slim.conv2d, 128, kernel_size=3, scope='conv2')
self.side_2 = self.side_layer(self.conv2, "side_2", 2)
self.pool2 = slim.max_pool2d(self.conv2, kernel_size=2, scope='pool2')
print('Added CONV-BLOCK-2+SIDE-2')
self.conv3 = slim.repeat(self.pool2, 3, slim.conv2d, 256, kernel_size=3, scope='conv3')
self.side_3 = self.side_layer(self.conv3, "side_3", 4)
self.pool3 = slim.max_pool2d(self.conv3, kernel_size=2, scope='pool3')
print('Added CONV-BLOCK-3+SIDE-3')
self.conv4 = slim.repeat(self.pool3, 3, slim.conv2d, 512, kernel_size=3, scope='conv4')
self.side_4 = self.side_layer(self.conv4, "side_4", 8)
self.pool4 = slim.max_pool2d(self.conv4, kernel_size=2, scope='pool4')
print('Added CONV-BLOCK-4+SIDE-4')
self.conv5 = slim.repeat(self.pool4, 3, slim.conv2d, 512, kernel_size=3, scope='conv5')
self.side_5 = self.side_layer(self.conv5, "side_5", 16)
print('Added CONV-BLOCK-5+SIDE-5')
self.side_outputs = [self.side_1, self.side_2, self.side_3, self.side_4, self.side_5]
self.fuse = slim.conv2d(tf.concat(self.side_outputs, axis=3),
1,[1,1], stride=1, scope='fuse_1',
weights_initializer=tf.constant_initializer(0.2),
biases_initializer=None)
print('Added FUSE layer')
# complete output maps from side layer and fuse layers
self.outputs = self.side_outputs + [self.fuse]
self.data_dict = None
print("Build model finished: {:.4f}s".format(time.time() - start_time))
def side_layer(self, inputs, scope, num_output):
with tf.variable_scope(scope):
side = slim.conv2d(inputs, 1, [1,1], stride=1,
activation_fn=None,
weights_initializer=tf.zeros_initializer(),
biases_initializer=tf.zeros_initializer(),
scope=scope+'_conv_1')
side = slim.conv2d_transpose(side,1, [2*num_output,2*num_output],
activation_fn=None,
biases_initializer=None,
stride = num_output,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
scope=scope+'_transopse')
return side
def conv_layer(self, x, W_shape, b_shape=None, name=None,
padding='SAME', use_bias=True, w_init=None, b_init=None):
W = self.weight_variable(W_shape, w_init)
tf.summary.histogram('weights_{}'.format(name), W)
if use_bias:
b = self.bias_variable([b_shape], b_init)
tf.summary.histogram('biases_{}'.format(name), b)
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
return conv + b
def deconv_layer(self, x, upscale, name, padding='SAME', w_init=None):
x_shape = tf.shape(x)
in_shape = x.shape.as_list()
w_shape = [upscale * 2, upscale * 2, in_shape[-1], 1]
strides = [1, upscale, upscale, 1]
W = self.weight_variable(w_shape, w_init)
tf.summary.histogram('weights_{}'.format(name), W)
# out_shape = tf.stack([in_shape[0], in_shape[1], in_shape[2], w_shape[2]]) * tf.constant(strides, tf.int32)
out_shape = tf.convert_to_tensor([in_shape[0],400,400,1])
print('fff', in_shape)
deconv = tf.nn.conv2d_transpose(x, W, out_shape, strides=strides, padding=padding)
return deconv
def weight_variable(self, shape, initial):
init = initial(shape)
return tf.Variable(init)
def bias_variable(self, shape, initial):
init = initial(shape)
return tf.Variable(init)
def add_dim(self, inputs, out_dim, scope):
return slim.conv2d(inputs, out_dim, [1,1], stride=1, padding='SAME',
scope=scope)
def setup_testing(self):
self.predictions = []
for idx, b in enumerate(self.outputs):
output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
self.predictions += [output]
def setup_training(self):
self.predictions = []
self.loss = 0
print('Deep supervision application set to {}'.format(self.cfgs['deep_supervision']))
for idx, b in enumerate(self.side_outputs):
output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
cost = sigmoid_cross_entropy_balanced(b, self.edgemaps, name='cross_entropy{}'.format(idx))
self.predictions.append(output)
if self.cfgs['deep_supervision']:
self.loss += (self.cfgs['loss_weights'] * cost)
fuse_output = tf.nn.sigmoid(self.fuse, name='fuse')
fuse_cost = sigmoid_cross_entropy_balanced(self.fuse, self.edgemaps, name='cross_entropy_fuse')
self.predictions.append(fuse_output)
self.loss += (self.cfgs['loss_weights'] * fuse_cost)
pred = tf.cast(tf.greater(fuse_output, 0.5), tf.int32, name='predictions')
error = tf.cast(tf.not_equal(pred, tf.cast(self.edgemaps, tf.int32)), tf.float32)
self.error = tf.reduce_mean(error, name='pixel_error')
tf.add_to_collection('losses', self.loss)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('error', self.error)
def sigmoid_cross_entropy_balanced(logits, label, name='cross_entropy_loss'):
y = tf.cast(label, tf.float32)
count_neg = tf.reduce_sum(1. - y)
count_pos = tf.reduce_sum(y)
# Equation [2]
beta = count_neg / (count_neg + count_pos)
# Equation [2] divide by 1 - beta
pos_weight = beta / (1 - beta)
cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)
# Multiply by 1 - beta
cost = tf.reduce_mean(cost * (1 - beta))
# check if image has no edge pixels return 0 else return complete error function
return tf.where(tf.equal(count_pos, 0.0), 0.0, cost, name=name)`
from holy-edge.
And can you add precision-recall curve step in the test...
from holy-edge.
Related Issues (20)
- About Loss Function HOT 1
- cannot clone pre-trained model file HOT 3
- Wrong loss function HOT 2
- Dimension Mismatch HOT 2
- Training does not converge HOT 3
- Error setting up VGG-16 model, Failed to interpret file '/home/xxx/holy-edge/hed/models/vgg16.npy' as a pickle HOT 3
- test error
- This repository is over its data quota. Purchase more data packs to restore access HOT 2
- Could anyone can provide a pretrained model in google drive or baiduyun? HOT 2
- a question about weight decay?
- why the loss changes a little like the picture bllow
- Does anyone have problems with Permission denied when running code with gpu?
- This backport is for Python 2.7 only HOT 1
- How to use the output model with Tensorflow lite for Mobile HOT 1
- Does the optimizer work when the loss is not a tensor?
- Is it true that you didn't train the parameters of VGG16? HOT 2
- How to test and output my own single image using the pretrained model
- Checkpoint can't be downloaded due to data limit reached on git lfs HOT 3
- Reupload the pretrained model?
- Requirements list Tensorflow 2, but code is in Tensorflow 1
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from holy-edge.